Exemplo n.º 1
0
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
    """Monitor weather for a lat/lon locaation.

    This sample service shows how you can use an external weather
    service to emit to a virtual sensor in the Helium platform.

    \b
    he-weather  --every <seconds> <sensor> <lat> <lon>

    The given virtual <sensor> is the id of a created Helium virtual
    sensor.

    The optional <seconds> parameter sets how often weather
    information needs to get fetched and posted to Helium. If the
    parameter is not provided a default (60 seconds)) is picked.

    This will run the service based on the given lat/lon.

    """
    client = Client(api_token=helium_key)
    sensor = Sensor.find(client, sensor)

    logging.basicConfig()
    scheduler = BlockingScheduler()
    scheduler.add_job(_process_weather, "interval",
                      seconds=every,
                      next_run_time=datetime.now(),
                      args=[darksky_key, lat, lon, sensor])
    click.echo("Checking every {} seconds".format(every))
    scheduler.start()
Exemplo n.º 2
0
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
class MonkeyHorde(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.twitter = self.get_twitter_connector()
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler, self.twitter)

    def unleash(self):
        if self.twitter:
            try:
                self.twitter.PostUpdate("I unleashed the evil monkey horde!!!")
            except Exception as e:
                log.exception(e)
        self.scheduler.start()

    def get_twitter_connector(self):
        try:
            credentials = self.config_file.items("twitter")
        except ConfigParser.NoSectionError:
            return None
        return twitter.Api(**dict(credentials))
Exemplo n.º 4
0
 def cmd_start(self):
     from apscheduler.schedulers.background import BlockingScheduler
     sched = BlockingScheduler()
     with transaction.manager:
         Scheduler.add_all_to_apscheduler(sched, DbSession, user=SYSTEM_UID,
                                          begin_transaction=True)
     sched.start()
     sched.print_jobs()
 def run(self):
     """Run watcher"""
     self.logger.info("Running watcher ...")
     scheduler = BlockingScheduler()
     scheduler.add_job(self.watching, 'interval', seconds=self.config["interval"])
     try:
         scheduler.start()
     except (KeyboardInterrupt, SystemExit):
         pass
Exemplo n.º 6
0
class MonkeyRunner(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=JanitorMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler)

    def start(self):
        self.scheduler.start()
Exemplo n.º 7
0
def start_scheduler_tasks(app_obj, celery_tasks):
    """
    Initializes the scheduler and schedules the specified tasks.

    :param app_obj: The application context
    :param celery_tasks: The celery_tasks object as created during application setup
    """
    scheduler = BlockingScheduler()
    scheduler.add_job(func=get_scheduled_tests, trigger="interval", seconds=15,
                      kwargs={'app_obj': app_obj, 'celery_tasks': celery_tasks})

    scheduler.add_job(func=get_test_results_from_db, trigger="interval", seconds=60,
                      kwargs={'celery_tasks': celery_tasks})

    scheduler.add_job(func=get_scheduled_sequence, trigger="interval", seconds=15,
                      kwargs={'app_obj': app_obj, 'celery_tasks': celery_tasks})

    scheduler.add_job(func=check_portal_alive, trigger="interval", seconds=60,
                      kwargs={'app': app_obj})

    scheduler.start()
Exemplo n.º 8
0
def run():
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 10
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors)

    start = datetime.today() - timedelta(days=45)
    start = start.strftime('%Y-%m-%d')
    # source_dir = 'data/industry_sw/'
    industry = 'all'
    max_file_count = 1000
    seq_dim = 20
    input_dim = 5
    out_dim = 8
    #add job for computing trendency of all stock
    scheduler.add_job(
        stock_model_sys_v2.select_best_stock_at_yestoday,
        'cron',
        day_of_week='mon-fri',
        hour=3,
        minute=0,
        args=[start, industry, max_file_count, seq_dim, input_dim, out_dim])
    #select best stock when matket has opened
    scheduler.add_job(
        stock_model_sys_v2.select_best_stock_after_open,
        'cron',
        day_of_week='mon-fri',
        hour=9,
        minute=26,
        args=[start, industry, max_file_count, seq_dim, input_dim, out_dim])

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.remove_all_jobs()
Exemplo n.º 9
0
    def daily_task(self):
        def func():
            day = datetime.datetime.now().strftime('%Y-%m-%d')
            # 活期
            self.da.init_balance(day, 1)
            self.logger.info(day, '活期每日余额计算完成')
            # 定期
            self.da.init_balance(day, 2)
            self.logger.info(day, '定期每日余额计算完成')
            # 理财
            self.da.init_balance(day, 3)
            self.logger.info(day, '理财每日余额计算完成')

        scheduler = BlockingScheduler()
        scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行

        try:
            scheduler.start()
        except Exception as e:
            # TODO 执行错误的处理方案
            self.logger.error('每日AUM计算出错:',e)
            scheduler.shutdown()
Exemplo n.º 10
0
def main():

    logging.basicConfig(filename='test.log', level=logging.ERROR, \
                    format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')

    #Kill gphoto2
    killgphoto()

    #Declaring/Calculating variables needed
    scheduler = BlockingScheduler(timezone='US/Central')
    hourbound = tl.getConfig('START_HOUR') + '-' + tl.getConfig('FINISH_HOUR')

    #Ensure photo_local_root exists
    if not os.path.exists(tl.getConfig('photo_local_root')):
        os.makedirs(tl.getConfig('photo_local_root'))

    #GP2 Log and Camera Setup
    gp.check_result(gp.use_python_logging())
    context = gp.gp_context_new()
    camera = gp.check_result(gp.gp_camera_new())
    gp.check_result(gp.gp_camera_init(camera, context))

    #Adding job to scheduler
    scheduler.add_job(captureandsave, 'cron', args=[camera,context], \
                      day_of_week='mon-sun', second='*/'+str(tl.getConfig('INTERVAL')), \
                      hour=hourbound)
    print('Press Ctrl+{0} to exit'.format( \
          'Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
        pass

    #Close Camera
    gp.check_result(gp.gp_camera_exit(camera, context))

    return 0
Exemplo n.º 11
0
def auto_water():
    with open('watering_parameters.txt') as handle:
        wp = json.loads(handle.read())
    with open("next_water.txt", "w+") as f1:
        f1.write("Watering twice a day at " + str(wp["first_water"]) +
                 ":00 and at " + str(wp["second_water"]) + ":00 for " +
                 str(wp["watering_time"]) + " seconds")
    update_logbook("Started auto watering")
    global scheduler
    scheduler = BlockingScheduler()
    scheduler.add_job(pump_on,
                      'cron',
                      day_of_week='mon-sun',
                      hour=wp["first_water"],
                      id='first_water')
    scheduler.add_job(pump_on,
                      'cron',
                      day_of_week='mon-sun',
                      hour=wp["second_water"],
                      id='second_water')
    scheduler.print_jobs()
    scheduler.start()
    return
Exemplo n.º 12
0
class TaskMgr:
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.update_time = Moment(hour=0, minute=0, second=0)
        self.list_task = []

    def add_task(self, task):
        self.list_task.append(task)

    def set_moment(self, hour):
        self.update_time.hour = hour

    def _at_the_moment(self):
        now = datetime.now()
        option_moment = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0)
        option_moment = option_moment.replace(hour=self.update_time.hour)
        return self._time_equal(t1=now.timestamp(), t2=option_moment.timestamp(), delta=30)

    def _time_equal(self, t1, t2, delta):
        if t1 > t2 - delta and t1 < t2 + delta:
            return True
        return False

    def _cron(self):
        for task in self.list_task:
            if self._at_the_moment() or not task.flag:
                task.run()

    def start(self, run_immedately=True):
        try:
            if run_immedately:
                for task in self.list_task:
                    task.run()
            self.scheduler.add_job(func=self._cron, trigger='cron', minute=0, second=0)
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown()
Exemplo n.º 13
0
class WeatherSign:
    def __init__(self, accuweather_api_key, EL233_dev):
        self.sign = EL233(EL233_dev)
        self.request_params = {
            "apikey": accuweather_api_key,
            "details": "true"
        }
        self.scheduler = None

    request_url = "http://dataservice.accuweather.com/currentconditions/v1/5622_POI"
    base_url = "http://dataservice.accuweather.com/currentconditions/v1/5622_POI"

    def update(self):
        response = requests.get(WeatherSign.base_url,
                                params=self.request_params)
        try:
            response.raise_for_status()
        except requests.HTTPError as e:
            print(e)
        d = json.loads(response.content)
        _time = d[0]['LocalObservationDateTime']
        deg_f = round(d[0]['Temperature']['Imperial']['Value'])
        rh = round(d[0]['RelativeHumidity'])
        print(f'{_time} temp: {deg_f}, humidity: {rh}')
        self.sign.display_temp_and_humidity(temp=deg_f, humidity=rh)

    def update_forever(self):
        if self.scheduler and self.scheduler.running:
            return
        self.scheduler = BlockingScheduler()
        self.scheduler.add_job(WeatherSign.update,
                               trigger='cron',
                               minute="0,30",
                               max_instances=1,
                               coalesce=True,
                               args=[self])
        self.scheduler.start()
Exemplo n.º 14
0
def main():
    log_init()

    LOG.info('init cronbackup...')
    scheduler = BlockingScheduler()

    with open(CONFIG_FILE, 'r') as f:
        config = json.load(f)

    backup_config = config.get('backup')
    backup_type = backup_config.get('type')
    backup_dir = backup_config.get('backup_dir')
    backup_hour = backup_config.get('hour')
    backup_minute = backup_config.get('minute')
    backup_interval = backup_config.get('interval_seconds')
    sync_config = config.get('sync')
    for task_config in config.get('tasks'):
        if not task_config.get('enable'):
            continue
        LOG.info('gen cronbackup task: %s' % task_config.get('name'))
        kwargs = {}
        if backup_type == 'cron':
            kwargs = {
                'day_of_week': '0-6',
                'hour': backup_hour,
                'minute': backup_minute
            }
        elif backup_type == 'interval':
            kwargs = {'seconds': backup_interval}
        scheduler.add_job(
            gen_task,
            backup_type,
            args=(backup_dir, sync_config, task_config),
            **kwargs)

    scheduler.start()
      sock = socket.socket()
      sock.connect((carbonServer, carbonPort))
    except socket.error, err:
      print "Could not connect to %s:%s, error code %s, %s" % ( carbonServer, carbonPort, err[0], err[1] )
      return 127
    binary = "/opt/nagios/bin/nagiostats"
    stat = ','.join(unicode(i) for i in stats)
    command = binary + " --mrtg --data=" + stat
    nagprocess = Popen(command, shell=True, stderr=PIPE, stdout=PIPE, universal_newlines=True)
    stdout, stderr = nagprocess.communicate()
    stdout = stdout.splitlines()
    for stat, metaData in stats.items():
        metricName, descr = metaData
        metricValue = stdout[0]
        del stdout[0]
        string = 'datacenter.stats.nagios.%s.%s %s %i\n' % (hostname, metricName, metricValue, calltime)
        sock.send(string)
        print "%s" % string
    sock.close()


if __name__ == "__main__":

  sched = BlockingScheduler()
  sched.add_job(collectStats, 'interval',  seconds=10)
  ret = collectStats()
  try:
    sched.start()
  except (KeyboardInterrupt, SystemExit):
    pass
Exemplo n.º 16
0
class DisseminationPlayer(object):

    MIDNIGHT = datetime.time(0, 0, 0)

    def __init__(self, top_data_dir, index_file, dir_files_to_parse,
                 files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(
            no_gems_header=True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5
        self._reference_date = datetime.datetime.now() + datetime.timedelta(
            seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination

    def add_jobs(self):
        """
          Create the jobs from the reference time
        :return:
        """
        for a_file in self._files:
            f_path = "%s/%s" % (self._dir_files, a_file)
            print("Parsing xferlog file %s" % f_path)
            fd = open(f_path)
            self._parser.set_lines_to_parse(fd)
            for elem in self._parser:
                #print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
                #find file in index
                filepath = self._index.get(elem['file'], None)
                if filepath:
                    #get time difference
                    midnight_date = utc.localize(
                        datetime.datetime.combine(elem['time'].date(),
                                                  self.MIDNIGHT))
                    #print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
                    time_diff = elem['time'] - midnight_date
                    scheduled_date = self._reference_date + time_diff
                    #create job and schedule it with the time difference added to the starting reference time
                    d_trigger = DateTrigger(scheduled_date)

                    self._scheduler.add_job(self._job_func,
                                            d_trigger,
                                            args=[filepath, self._destination])
                else:
                    print("Could not find %s\n in Index" % (elem['file']))

        print("Player. %d jobs scheduled.\n" %
              (len(self._scheduler.get_jobs())))

    def start(self):
        """
        :return:
        """
        self._scheduler.configure(jobstores=jobstores,
                                  executors=executors,
                                  job_defaults=job_defaults,
                                  timezone=utc)

        print("Start Scheduler. Jobs will start to be played in %d sec." %
              self._defer_time)
        self._scheduler.start()
Exemplo n.º 17
0
class ActionScheduler:
    def __init__(self):
        self.scheduler = BlockingScheduler(jobstores=APSCHEDULER_SETTINGS['jobstores'],
                                           executors=APSCHEDULER_SETTINGS['executors'],
                                           job_defaults=APSCHEDULER_SETTINGS['job_defaults'],
                                           timezone=TIMEZONE_PST8PDT)
        pass

    def start(self):
        self._add_event_listener()
        # self._add_example_jobs()
        self._add_jobs()
        self.scheduler.start()

    def shutdown(self):
        # self.scheduler.remove_all_jobs()  # save all jobs into sqlite, do not remove them
        self.scheduler.shutdown()

    def _add_event_listener(self):
        self.scheduler.add_listener(ActionScheduler.listener_jobs_status, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.scheduler.add_listener(ActionScheduler.listener_all_jobs_finished, EVENT_ALL_JOBS_REMOVED)

    # examples
    def _add_example_jobs(self):
        import datetime
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["cron", ], trigger='cron', second='*/5',
                               misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, id="cron")
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["interval", ], trigger='interval', seconds=60,
                               misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, id="interval")
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["date", ], trigger='date',
                               run_date=get_cur_time()+datetime.timedelta(seconds=12), id="date")

    # examples
    @staticmethod
    def job_example(job_type):
        print("job_example: {}".format(job_type))

    def _add_jobs(self):
        # add reap alerts immediate job TODO test
        # self.scheduler.add_job(id="reap_alerts_immediate", func=ActionScheduler.job_reap_alerts_and_start_action_tasks, args=[],
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add reap alerts interval job
        # self.scheduler.add_job(id="reap_alerts", func=ActionScheduler.job_reap_alerts_and_start_action_tasks,
        #                        args=[], trigger='interval', seconds=REAP_INTERVAL_SECONDS,
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add gather & retry failed action tasks immediate job TODO test
        # self.scheduler.add_job(id="check_tasks_immediate", func=ActionScheduler.job_gather_and_retry_failed_action_tasks, args=[],
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add gather & retry failed action tasks interval job
        # self.scheduler.add_job(id="check_tasks", func=ActionScheduler.job_gather_and_retry_failed_action_tasks,
        #                        args=[], trigger='interval', seconds=GATHER_FAILED_TASKS_INTERVAL_SECONDS,
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        pass

    @staticmethod
    def listener_all_jobs_finished(event):  # this would hardly be invoked
        logger_.info('All jobs are done.')

    @staticmethod
    def listener_jobs_status(event):
        if event.exception:
            logger_.warn('Job {} crashed.'.format(event.job_id))
        else:
            logger_.info('Job {} executed.'.format(event.job_id))
Exemplo n.º 18
0
        thread = Thread(target=dispatching, kwargs={'data': {'cmd':"python3 procQ.py {}".format(t)}})
        thread.start()
        t_list.append(thread)
        if len(t_list) == working_threads:
            print('Waiting for previous task.')
            for t in t_list:
                t.join()
            t_list = []
    for t in t_list:
        t.join()



timez = pytz.timezone('Asia/Taipei')
# scheduler = BackgroundScheduler()
# scheduler.add_job(func=cleanQ, trigger="interval", seconds=60, timezone=timez)
# scheduler.start()

if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_job(func=cleanQ, trigger="interval", seconds=3, timezone=timez)

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()  #采用的是阻塞的方式,只有一個線程專職做調度的任務
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()
        print('Exit The Job!')
Exemplo n.º 19
0
def run_schedule_sprinkles():
    from devices.services.run_scheduled import run_scheduled_sprinkle

    scheduler = BlockingScheduler()
    scheduler.add_job(run_scheduled_sprinkle, 'interval', minutes=1)
    scheduler.start()
Exemplo n.º 20
0
class Monitor ():

    def __init__ (self, bay = 2, temperature_ctrl = None, wait_email = 20, wait_T_readout = 30):
        self._bay = bay
        self._name = 'bay'+str(bay)
        self._notifications = True
        self._wait_email = wait_email
        self._wait_T_readout = wait_T_readout
        self._offset = 100
        self._pwd = None

        self._temperature_ctrl = temperature_ctrl

        if temperature_ctrl:
            self._Tctrl = temperature_ctrl
        else:
            self._Tctrl = None
            print ("No temperature controller!")

        self._max_T = 10

        self._scheduler = BlockingScheduler()
        self._scheduler.configure(timezone='UTC')
        self._scheduler.add_job(self._check_email, 'interval', seconds=self._wait_email)
        self._scheduler.add_job(self._get_temperature, 'interval', seconds=self._wait_T_readout)

    def login (self):
        try:
           print ("Enter password...")
           self._pwd = getpass.getpass()
           self._email = QPLemail.QPLmail(bay=self._bay, password=self._pwd)
        except:
            print ("Login failed!")

    def set_max_temperature (self, T=10):
        self._max_T = T

    def set_channel (self, channel):
        self._channel = channel

    def _check_email (self):
        msg_dict = self._email.fetch_unread()

        for msg in msg_dict:
            body = msg['body'][0].as_string()
            #print (msg)
            sender = msg['mail_from'][0]
            sender_addr = msg['mail_from'][1]
            #print (sender)
            #print (sender_addr)

            if (body.find ('notifications-off')>0):
                self._deactivate(sender_addr)
            elif (body.find ('notifications-on')>0):
                self._activate(sender_addr)
            elif (body.find('get-temperature')>0):
                T = self._get_temperature()
                # here I need to extract the sender email address, not the name
                self._email.send (to=[sender_addr], 
                            subject='Temperature readout', 
                            message='Current temperature: '+str(self._curr_T)+'K')
            elif (body.find ('send-report')>0):
                self._send_report()
            else:
                print ("None")

    def _send_alarm_email (self):
        email_to = ['*****@*****.**', '*****@*****.**'] 
        #email_to = ['*****@*****.**'] 
        self._email.send (to=email_to, subject='Help!', 
                                message='Current temperature: '+str(self._curr_T)+'K')
        print ("ALARM: temperature = "+str(self._curr_T)+ "K. Email sent to: ")
        print (email_to)

    def _activate(self, sender):
        self._notifications = True
        print ("Notifications activated, as requested by: "+sender)
        self._email.send (to=['*****@*****.**', sender], subject='Settings change', 
                            message='Notifications activated, as requested by '+sender)

    def _deactivate(self, sender):
        self._notifications = False
        print ("Notifications de-activated, as requested by: "+sender)
        self._email.send (to=['*****@*****.**', sender], subject='Settings change', 
                            message='Notifications de-activated, as requested by '+sender)

    def _get_temperature (self, overrule_notifications=False):
        self._curr_T = self._temperature_ctrl.get_kelvin(channel = self._channel)
        #print ("Read temperature: ", self._curr_T)

        if (self._curr_T>self._max_T):
            if (self._notifications):
                self._send_alarm_email()
        return self._curr_T

    def _send_report (self):
        pass

    def start (self):

        print('Press Ctrl+C to exit')

        try:
            self._scheduler.start()

            while True:
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            # Not strictly necessary if daemonic mode is enabled but should be done if possible
            self._scheduler.shutdown()
Exemplo n.º 21
0
    def __init__(self, config, inc_program, q_prog, time_init):
        GPIO.setmode(GPIO.BCM)  # Numbers GPIOs by physical location
        GPIO.setwarnings(False)

        # get pins for stepper & setup
        self.step_pin = config["setup_pin"]["step"]
        self.sleep_pin = config["setup_pin"]["sleep"]
        pins = [self.step_pin, self.sleep_pin]
        for p in pins:
            GPIO.setup(p, GPIO.OUT)
            GPIO.output(p, GPIO.LOW)

        steps_per_round = config["steps"]
        self.step_count = steps_per_round * 32  # a full round in Mode 101
        self.step_delay = .0208 / 32  # controls the speed of the motor

        # read config files
        self.q_prog = q_prog
        self.inc_program = inc_program
        self.time_init = time_init

        self.q_prog.put(inc_program["default_phase"]
                        )  # send standard inc_program to controller
        logging.info("Setting controller to default parameters")
        print("Setting controller to default parameters")

        scheduler = BlockingScheduler()  # init scheduler

        # define timepoints of incubation phase changes relative to time_init
        if inc_program["phases"] > 1:
            logging.info("Additional phases found and added to scheduler")
            print("Additional phases found and added to scheduler")
            phase_changes = inc_program["phases"] - 1
            for p in range(phase_changes):
                phase = time_init + timedelta(
                    days=inc_program["phase_changes"][p])

                # add job to scheduler
                scheduler.add_job(self.next_phase,
                                  args=(p, ),
                                  trigger='date',
                                  next_run_time=phase)
                logging.info(f'Controller update scheduled for: {phase}')
                print(f'Controller update scheduled for: {phase}')

        #  set scheduler to interval until end point of egg moving, relative to time_init
        if inc_program[
                "activate_move_eggs"] == 1:  # check if eggs should be moved
            scheduler.add_job(self.move_eggs,
                              trigger='interval',
                              hours=inc_program["interval_move_eggs"],
                              start_date=datetime.now(),
                              end_date=time_init +
                              timedelta(days=inc_program["days_move_eggs"]))
            logging.info(
                f'Egg moving is activated and scheduled every {inc_program["interval_move_eggs"]} hours'
            )
            print(
                f'Egg moving is activated and scheduled every {inc_program["interval_move_eggs"]} hours'
            )
            self.move_eggs()  # move eggs once at start

        # scheduler.print_jobs()
        scheduler.start()
Exemplo n.º 22
0
class St2TimerSensor(Sensor):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, sensor_service=None):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._sensor_service = sensor_service
        self._log = self._sensor_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def run(self):
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type, 'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._sensor_service.dispatch(trigger, payload)
Exemplo n.º 23
0
from apscheduler.schedulers.background import BlockingScheduler
from main import send_message

sched = BlockingScheduler()

#Schedule job_function to be called every two hours
sched.add_job(send_message, 'interval',
              seconds=2)  #interval can be hours or seconds

sched.start()
Exemplo n.º 24
0
class ReminderDaemon(object):
    """Parent Daemon to keep track of scheduled jobs and watch for config file changes."""
    def __init__(self,
                 blocking=True,
                 timezone='UTC',
                 config_path='.',
                 logger_level=None,
                 *args,
                 **kwargs):
        """
        Create ReminderDaemon object.

        :param boolean blocking:
            Determines if Scheduler should be BlockingScheduler or BackgroundScheduler.
        :param str timzone: Timezone for the scheduler to use when scheduling jobs.
        :param str config_path: Path to configuration files.
        :param int logger_level: Level to set logger to.
        """
        self.logger = logging.getLogger(__name__)
        if logger_level:
            self.logger.setLevel(logger_level)
        self.logger.debug('initializing daemon')
        self.scheduler = BlockingScheduler(
            timezone=timezone) if blocking else BackgroundScheduler(
                timezone=timezone)
        self.reminders = []
        self.configs = {}
        self.timezone = timezone
        self._observer = Observer()
        self.config_path = config_path
        self._watchdog_handler = PatternMatchingEventHandler('*.yaml;*.yml')
        self._watchdog_handler.on_created = self.on_created
        self._watchdog_handler.on_modified = self.on_created
        self._watchdog_handler.on_deleted = self.on_deleted
        self._observer.schedule(self._watchdog_handler, self.config_path)

    def start(self):
        """Start the observer and scheduler associated with daemon."""
        self._observer.start()
        self.scheduler.start()

    def add_reminder(self, reminder_config):
        """
        Create new reminder and add to daemon.

        :param dict reminder_config:
            Dictionary configuration for creating Reminder.
            Typically loaded from YAML file.
        """
        reminder_config['daemon'] = self
        reminder = Reminder(**reminder_config)
        self.update(reminder)

    def update(self, reminder):
        """
        Update Daemon with new Reminder object.
        Operates by either appending new reminder or replacing existing reminder.

        :param Reminder reminder: Reminder to be added or updated.
        """
        if reminder not in self.reminders:
            for job in reminder.jobs:
                self.logger.debug('adding job to scheduler: %s', job)
                try:
                    job_def = self.scheduler.add_job(**job)
                    reminder.job_ids.append(job_def.id)
                except TypeError:
                    logger.error('Unable to add job to scheduler',
                                 exc_info=True)
            self.reminders.append(reminder)
        else:
            self.remove_reminder(reminder)
            self.update(reminder)

    def remove_reminder(self, reminder):
        """
        Remove reminder from Daemon.

        :param Reminder reminder: The Reminder to be removed.
        """
        for job_id in reminder.job_ids:
            self.scheduler.remove_job(job_id)
        self.reminders.remove(reminder)

    def on_created(self, event):
        """
        Callback for on_created events to be associated with watchdog EventHandler.

        :param event: Event object representing the file system event.
        :event type: watchdog.events.FileSystemEvent
        """
        self.logger.debug('creation event received for {}'.format(
            event.src_path))
        if not event.is_directory:
            path = os.path.basename(event.src_path)
            self.load_yaml(path)
        else:
            self.logger.debug('skipping event because it is directory')

    def load_yaml(self, path):
        """
        Read and process yaml config.

        :param str path: The path of yaml config to load.
        """
        self.logger.debug('loading yaml config from %s', path)
        path = os.path.join(self.config_path, path)
        with open(path) as f:
            config = yaml.safe_load(f.read())
            reminder_config = config.get('reminder')
            self.logger.debug('loaded reminder_config: %s', reminder_config)
            if reminder_config:
                self.add_reminder(reminder_config)
                self.logger.info('loaded reminder config from %s', path)
                self.configs[os.path.basename(path)] = self.reminders[-1]
        # self.configs[path] = config

    def on_deleted(self, event):
        """
        Callback for on_deleted events to be associated with watchdog EventHandler.

        :param event: Event object representing the file system event.
        :event type: watchdog.events.FileSystemEvent
        """
        self.logger.debug('deletion event for %s', event.src_path)
        path = os.path.basename(event.src_path)
        if path in self.configs:
            self.remove_reminder(self.configs[path])
            del self.configs[path]
            self.logger.info('removed config for %s', path)
        else:
            self.logger.debug(
                'No action taken for deletion event because it doesn\'t appear to exist in configs: %s',
                self.configs)
Exemplo n.º 25
0
    print(msg)


def test2(symbols):
    quotes = get_quotes(symbols)
    for line in quotes:
        producer.send(topic, line.encode())
        print("send " + line)


if __name__ == '__main__':
    init_db()
    table = quote_table
    key_space = key_space
    session = get_session()

    schedule = BlockingScheduler()
    schedule.add_executor('threadpool')
    producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
    # Assign a topic
    topic = 'test'
    symbols = ['GOOG', 'AAPL', 'FB', 'AMZN', 'MSFT']
    for symbol in symbols:
        schedule.add_job(fetch_stock,
                         'interval',
                         args=[symbol],
                         seconds=30,
                         id=symbol)  # every second run fetch_price
    schedule.start()
    # test2(symbols)
Exemplo n.º 26
0
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = list(TIMER_TRIGGER_TYPES.keys())
        self._trigger_watcher = TriggerWatcher(
            create_handler=self._handle_create_trigger,
            update_handler=self._handle_update_trigger,
            delete_handler=self._handle_delete_trigger,
            trigger_types=self._trigger_types,
            queue_suffix=self.__class__.__name__,
            exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            util_schema.validate(instance=trigger['parameters'],
                                 schema=trigger_type['parameters_schema'],
                                 cls=util_schema.CustomValidator,
                                 use_default=True,
                                 allow_default_none=True)
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' %
                                     (self._get_trigger_type_name(trigger),
                                      trigger.get('name',
                                                  uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger,
                                          payload,
                                          trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(
            list(TIMER_TRIGGER_TYPES.values()))

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = TriggerAPI.from_model(trigger).to_dict()
        return sanitized
Exemplo n.º 27
0
 def start_scheduler(self):
     #setting timezone to singapore
     scheduler = BlockingScheduler(timezone="Asia/Singapore")
     scheduler.add_job(self.wake_up_app, 'cron', hour='5,9', minute=45)
     scheduler.add_job(self.wake_up_app, 'cron', hour='15,18', minute=45)
     scheduler.start()
Exemplo n.º 28
0
class TaskerDo:
    '''
    schedule template to file generation based on redis
    '''
    ip_list =[]
    def __init__(self, conf_file):
        self.get_configuration(conf_file)
        self.client = redis.Redis(**self.config['settings']['redis'])
        self.set_logging(self.config['settings']['logfile'])
        self.sched = BlockingScheduler()
        self.set_scheduler(self.run, self.config['generate']['schedule'])

    def set_scheduler(self,job,schedule,args=None):
        '''
        set schedual jobs
        :param schedule: dict, contain 'name', 'trigger' =[cron|interval|more apscheduler on web] , and params minuets,second, and more on apscheduler
        :return: None
        '''
        self.sched.add_job(job, args=args,trigger=schedule['trigger'], name=schedule['name'],**schedule['params'])


    def start(self):
        self.sched.start()

    def stop(self):
            self.sched.shutdown()

    def run(self):
        '''
        This is where the magic starts and all the jobs been executed.
          optional load ip_list to redis,
          get ips from redis -> generate file -> run reload job
        :param ip_list: optional, list of ip to loan into redis
        :return: nothing yet
        '''
        if len(self.ip_list) > 0:
            self.redis_push_ip_list(self.ip_list, expiration=self.config['bad_ip_expiration'])
            self.ip_list = []
        ip_list = self.redis_get_ip_list()
        logging.info("%s items in database: %s" % (len(ip_list), ",".join(ip_list)))
        if len(ip_list) > 0:
            generate = self.config['generate']
            status = self.file_generate(template_file=generate['template'],generated_file=generate['file'], ip_list=ip_list)
            if status:
                logging.info("configuration file: %s was created"%(generate['file']))
                for command in self.config['generate']['reload_commands']:
                    logging.info("executing command: %s" % (command))
                    try:
                        if self.reload_services(command):
                            logging.info("Success")
                    except CommandFail as e:
                        self.logger.error(e)
                        break
            else:
                logging.info('reload not needed')

    def is_diff(self, content_str, current_file):
        '''
        Compare new version to existing one
        :param content_str: string, new version content string
        :param current_file: file, existing file
        :return: bool, true is different
        '''
        try:
            with open(current_file) as f:
                current_str = f.read()
        except FileNotFoundError:
            return True
        if content_str == current_str:
            return False
        else:
            return True

    def get_configuration(self, conf_file):
        '''
        :param conf_file: yaml configuration file with report data
        :return: configuration data
        '''
        with open(conf_file) as f:
            yaml_data = f.read()
        self.config = yaml.load(yaml_data)

    def set_logging(self, log_file):
        self.logger = logging.getLogger()
        hdlr = logging.FileHandler(log_file)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        self.logger.addHandler(hdlr)
        self.logger.setLevel(logging.INFO)

    def file_generate(self, template_file, generated_file, **kwargs):
        '''
        generate application config file using template
        :param template_file: file, the template file to generate using Jinja2 template engine
        :param generated_file: file, the file to generate
        :param kwargs: the argument to send to templates
        :return: bool status
        '''
        with open(template_file) as t:
            template_str = t.read()
        t = Template(template_str)
        content_str = t.render(**kwargs)
        is_diff = self.is_diff(content_str, generated_file)
        if is_diff:
            return self.write_content(content_str, generated_file)
            logging.info("New version exist")
        else:
            logging.info("No changes in current file, generation not needed")
            return False

    def redis_push_ip_list(self, ip_list, expiration):
        '''
        push list of ip's to redis and set expiration
        :param ip_list: list of strings
        :param expiration: int second
        :return: bool status
        '''
        ip_list = [ip for ip in ip_list if not ipaddress.ip_address(ip).is_private]
        if len(ip_list) == 0:
            raise ValueError('Empty list supplied')
        for key in ip_list:
            _ = self.client.set(key, None, ex=expiration)
        return True

    def redis_get_ip_list(self):
        '''
        get the list of ips from redis
        :return: list
        '''
        ips = self.client.keys('*')
        return [i.decode() for i in ips]

    def write_content(self, content_str, generated_file, backup=True):
        '''
        helper to generate_file function, write the data to file and create backup file
        :param content_str: string, the generated content
        :param generated_file:
        :param backup: bool
        :return: bool, status
        '''
        if backup and os.path.exists(generated_file):
            date = datetime.now().strftime('%y%m%d%H%M%S')
            os.rename(generated_file, generated_file + '.' + date)
        with open(generated_file, 'w') as conf_file:
            _ = conf_file.write(content_str)
        return True

    def reload_services(self, commands_list):
        '''
        list of command_str to execute, stop if one failed
        :param commands_list: list of shell command_str
        :return: bool, status
        '''
        for command_str in commands_list:
            exit_status,output = subprocess.getstatusoutput(command_str)
            if exit_status > 0:
                raise CommandFail("Command %s failed with status %s output: %s" % (command_str, exit_status, output))
        return True
Exemplo n.º 29
0
class Scheduler:
    def __init__(self):
        conf = configparser.ConfigParser()
        conf.read("../agent.ini")
        ip = conf.get("redis", "ip")
        port = conf.getint("redis", "port")
        timeout = conf.getint("redis", "timeout")
        self.invoker_id = self._get_invoker_id()
        self.max_tasks = conf.getint("invoker", "max_tasks")
        self.live_seconds = conf.getint("invoker", "live_seconds")
        self.db = SchedulerDb(ip, port, timeout)
        logging.config.fileConfig("../logger.ini")
        self.logger = logging.getLogger("main")
        executors = {
            'default': {'type': 'processpool', 'max_workers': self.max_tasks + 1}
        }
        self.blockScheduler = BlockingScheduler()
        self.jobs = {}
        self.lock = threading.Lock()

    @staticmethod
    def _get_invoker_id():
        hostname = socket.gethostname()
        pid = os.getpid()
        return hostname + "-" + str(pid)

    def task_invoke(self, task_instance, task_param):
        if task_param.cmd.startswith('http'):
            executor = HttpExecutor(self.db, task_instance, task_param)
            executor.execute()
        else:
            pass

    def break_heart(self):
        """
        invoker每隔一段时间就心跳一下,看看是否有新任务,是否有任务需要更新
        :param bs:
        :return:
        """
        # 先看看参数是否有变化的把调度重启或者关闭
        try:
            self.lock.acquire()
            self.refresh_local_invoker()
            self.refresh_other_invokers()
            if len(self.jobs) >= self.max_tasks:
                return

            task_instances, task_params = self.db.query_waiting_run_tasks(self.invoker_id,
                                                                          self.max_tasks - len(self.jobs),
                                                                          True)
            if len(task_instances) == 0:
                return
            for i in range(len(task_instances)):
                task_instance = task_instances[i]
                task_param = task_params[i]
                if task_instance.id not in self.jobs.keys():
                    self.logger.info("分配了新任务%s", task_instance.id)
                    job = self.blockScheduler.add_job(self.task_invoke,
                                                      next_run_time=(
                                                          datetime.datetime.now() + datetime.timedelta(seconds=2)),
                                                      args=[task_instance, task_param], id=task_instance.id)
                    self.jobs[job.id] = job
                    self.db.lock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
                else:
                    self.logger.error("%s任务已经在运行", task_instance.id)
        finally:
            self.lock.release()

    def refresh_local_invoker(self):
        """
        调度的参数是否发生变化,如有需要重启调度
        :param bs:
        :return:
        """

        self.db.update_invoker_time(self.invoker_id, self.jobs.keys(), self.live_seconds)
        self.logger.info("%s心跳更新成功!", self.invoker_id)
        # 看看是否有需要停止的任务再自己这里,释放掉
        stop_tasks = self.db.query_need_stop_tasks(self.invoker_id)
        for stop_task in stop_tasks:
            if stop_task in self.jobs.keys():
                try:
                    job = self.jobs[stop_task]
                    task_instance = job.args[0]
                    task_instance.status = 'off'
                    job.pause()
                    job.remove()
                except Exception as e:
                    self.logger.error(e)
                    self.jobs.pop(stop_task)
                    try:
                        self.blockScheduler.remove_job(stop_task)
                    except Exception as e1:
                        self.logger.error(e1)

            self.logger.info("人工停止了任务%s", stop_task)
            self.db.unlock_invoker_instance(self.invoker_id, stop_task, self.live_seconds)

        # 是否有参数变化的任务需要重启
        c_jobs = copy.copy(self.jobs)
        for key in c_jobs.keys():
            if key not in self.jobs.keys():
                continue
            job = self.jobs[key]
            task_instance = job.args[0]
            old_task_param = job.args[1]
            # 判断参数是否发生变化,如果有变化重新执行任务
            new_task_param = self.db.query_task_param(task_instance.task_param_id)
            # if new_task_param
            if not new_task_param.has_diff(old_task_param):
                continue

            try:
                task_instance.status = 'off'
                job.pause()
                job.remove()
            except Exception as e:
                self.logger.error(e)
                self.jobs.pop(key)
                try:
                    self.blockScheduler.remove_job(key)
                except Exception as e1:
                    self.logger.error(e1)
            self.logger.info("参数变化停止了任务%s", task_instance.id)
            self.db.unlock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
            self.db.add_task_waiting_run(task_instance.id)

    def refresh_other_invokers(self):
        """
        遍历所有的invoker,判断invoker是否超过存活期
        :return:
        """
        invokers = self.db.query_all_invokers()
        for invoker_id in invokers.keys():
            if not self.db.invoker_is_live(self.invoker_id):
                task_instance_list = self.db.query_invoker_tasks(self.invoker_id)
                for task_instance_id in task_instance_list:
                    self.db.add_task_waiting_run(task_instance_id)

    def main(self):
        try:
            self.db.register_invoker(self.invoker_id, self.max_tasks, self.live_seconds);
            self.blockScheduler.add_listener(self._job_listener,
                                             events.EVENT_JOB_ERROR | events.EVENT_JOB_MISSED)

            self.blockScheduler.add_job(self.break_heart, "interval", seconds=self.live_seconds / 2,
                                        id="break_heart")
            self.logger.info("开始启动调度...")
            self.blockScheduler.start()
            self.logger.info("启动调度成功!")
        except KeyboardInterrupt as e:
            self.logger.info(e)
            self.blockScheduler.shutdown()

    def _job_listener(self, ev):
        """
        监听job的事件,job完成后再发起下次调用,对于异常也要处理
        :param ev:
        :return:
        """
        if ev.code == events.EVENT_JOB_ERROR:
            self.logger.error(ev.exception)
            self.logger.error(ev.traceback)
        else:
            pass
Exemplo n.º 30
0
Arquivo: base.py Projeto: hejin/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix=self.__class__.__name__,
                                               exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger),
                                                          trigger.get('name', uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
Exemplo n.º 31
0
class AutoCopy(object):

    config_class = Config

    default_config = ImmutableDict({
        'DEBUG': False,
        'DRIVE_LIST': [],
        'DST_PATH': 'C:\\autocopy',
        'EXTENSION': ['.txt'],
        'FILE_SIZE': "10 MB",
        'SECONDS': 2,
        'UNIT': {
            "KB": 1024,
            "MB": 1024 * 1024,
            "GB": 1024 * 1024 * 1024
        }
    })

    def __init__(self, import_name, *args, **kwargs):
        self.import_name = import_name
        self.get_logica_drives = win32file.GetLogicalDrives
        self.config = self.make_config()
        self.scheduler = BlockingScheduler()

    def init(self):
        self.drive_list = self.config.get('DRIVE_LIST')
        self.dst_path = self.config.get('DST_PATH')
        self.unit = self.config.get('UNIT')
        self.size = self.config.get('FILE_SIZE')
        self.extension = self.config.get('EXTENSION')
        self.seconds = self.config.get('SECONDS')
        self.is_dst_path()

    def make_config(self):
        return self.config_class(self.default_config)

    def is_u_disk(self, drive, drive_num=2):
        return win32file.GetDriveType(drive) == drive_num

    def get_u_disk(self, drive_list=None):
        sign = self.get_logica_drives()
        drive_list = drive_list or self.drive_list

        drives = (drive_list[i] for i in range(len(drive_list) - 1)
                  if (sign & 1 << i and self.is_u_disk(drive_list[i])))

        return drives

    def is_dst_path(self, dst_path=None):
        dst_path = dst_path or self.dst_path

        if not os.path.exists(dst_path) or not os.path.isdir(dst_path):

            try:
                os.mkdir(dst_path)
            except Exception:
                os.remove(dst_path)
                os.mkdir(dst_path)

        return None

    def _get_size(self, size, unit='KB'):
        units = self.unit

        if isinstance(size, integer_types):
            return size * units.get(unit, 1024)

        try:
            size, unit = size.split(' ')
        except Exception:
            pass
        else:
            return int(size) * units.get(unit, 1024)

        return 1024

    def is_file_size(self, file, size=None):
        if not isinstance(file, basestring):
            raise TypeError("This is not a string.")

        size = size or self.size

        return os.path.getsize(file) < self._get_size(size=size)

    def _copyfile(self, path, dst_path=None, extension=None):

        for path, _, file_list in os.walk(path):
            for file in file_list:
                _, ext = os.path.splitext(file)
                file = os.path.join(path, file)
                if ext in extension and self.is_file_size(file):
                    try:
                        shutil.copy(file, dst_path)
                    except Exception:
                        self.is_dst_path()

    def copyfile(self, dst_path=None, extension=None):
        extension = extension or self.extension
        dst_path = dst_path or self.dst_path
        drives = self.get_u_disk()

        for drive in drives:
            self._copyfile(drive, dst_path, extension)

    def _run(self, *args, **kwargs):
        self.copyfile(*args, **kwargs)

    def run(self, timer=True, seconds=None, *args, **kwargs):
        self.init()

        if not timer:
            return self._run

        trigger = IntervalTrigger(seconds=seconds or self.seconds)
        self.scheduler.add_job(self._run, trigger)
        # self.scheduler.add_job()

        try:
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown()
Exemplo n.º 32
0
class EventScheduler:
    def __init__(self, reporter: ResultReporter):
        self.reporter = reporter
        self.scheduler = BlockingScheduler()
        self.events = list()
        log_path = static_setting.settings["CaseRunner"].log_path
        log_file = os.path.join(log_path, "event_scheduler_log.log")
        self.log = logger.register("EventScheduler",
                                   filename=log_file,
                                   for_test=True)
        self.scheduler.add_listener(self._event_listen, EVENT_JOB_EXECUTED)

    def add_event(self,
                  event,
                  package,
                  args,
                  is_background,
                  need_lock,
                  start_time,
                  interval=5,
                  loop_count=1,
                  description=""):
        m = importlib.import_module(package)
        event_cls = getattr(m, event)
        new_event = event_cls(description, log=self.log)
        new_event.need_lock = need_lock
        new_event.back_ground = is_background
        new_event.arguments = args
        new_event.interval = interval
        new_event.loop_count = loop_count
        # 生成一个STEP 的节点给Event操作
        new_event.reporter = self.reporter.add_event_group(f"Event: {event}")

        if is_background:
            new_event.job = self.scheduler.add_job(new_event.run,
                                                   "interval",
                                                   seconds=interval,
                                                   start_date=start_time,
                                                   id=f"{event}{uuid.uuid4()}")
        else:
            new_event.job = self.scheduler.add_job(new_event.run,
                                                   "date",
                                                   run_date=start_time,
                                                   id=f"{event}{uuid.uuid4()}")
        self.events.append(new_event)

    def remove_event(self, event_id):
        job = self.scheduler.get_job(event_id)
        if job:
            event_to_remove = None
            for event in self.events:
                if event.job == job:
                    event_to_remove = event
                    self.scheduler.remove_job(event_id)
                    break
            if event_to_remove:
                self.events.remove(event_to_remove)

    def start(self):
        self.scheduler.start()

    def _event_listen(self, job):
        for event in self.events:
            if event.job.id == job.job_id:
                if event.back_ground:
                    return
                else:
                    if event.loop_count == 1:
                        return
                    delta = datetime.timedelta(seconds=event.interval)
                    next_date = job.scheduled_run_time + delta
                    event.job = self.scheduler.add_job(
                        event.run,
                        "date",
                        run_date=next_date,
                        id=f"{event.name}{uuid.uuid4()}")
                    event.loop_count -= 1
                    return
Exemplo n.º 33
0
class Feeds(Thread):

    WINDOW = 10_000

    def __init__(self, sources, feeds, sleep, logger):

        Thread.__init__(self)

        self.sleep = sleep
        self.logger = logger

        self.coords = deque([(source.strip(), feed.strip())
                             for source, feed in zip(sources, feeds)])

        self.entries = []
        self.last = {feed: [] for _, feed in self.coords}

        socket.setdefaulttimeout(3)

    def run(self):

        job_defaults = {'coalesce': True, 'max_instances': 1}
        self.blocker = BlockingScheduler(job_defaults=job_defaults)
        self.blocker.add_job(self.parse_feed,
                             'cron',
                             second=f'*/{self.sleep}',
                             id='parse_feed')

        self.blocker.start()

    def on_close(self):

        self.blocker.shutdown()
        self.join()

    def parse_feed(self):

        self.coords.rotate()
        self.source, self.feed = self.coords[0]

        try:
            response = feedparser.parse(self.feed)
        except Exception as e:
            self.logger.warning(f"Status,{self.source},{self.feed},{e}")
            return

        status = response.get('status', None)
        if not status:
            self.logger.warning(f"Status,{self.source},{self.feed},None")
            return

        if status != 200:
            self.logger.warning(f"Status,{self.source},{self.feed},{status}")
            return

        entries = response.get('entries', None)
        if not entries:
            self.logger.warning(f"Entries,{self.source},{self.feed},None")
            return

        for entry in entries:

            _id = entry['id'] if self.source == 'Google' else get_id(
                entry.copy())
            if _id in self.last[self.feed]:
                continue

            self.last[self.feed].append(_id)
            self.last[self.feed] = self.last[self.feed][-self.WINDOW:]

            entry['acquisition_datetime'] = datetime.utcnow().isoformat()[:19]
            entry['feed_source'] = self.source
            entry['_source'] = 'rss'
            entry['_id'] = _id

            print(self.source)
            self.entries.append(entry)

        if len(self.entries) > 0:

            with open(f"{DIR}/news_data/{str(uuid.uuid4())}.json",
                      "w") as file:
                file.write(json.dumps(self.entries))

            self.entries = []
Exemplo n.º 34
0
class DisseminationPlayer(object):

    MIDNIGHT = datetime.time(0,0,0)

    def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5 
        self._reference_date = datetime.datetime.now() +  datetime.timedelta(seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination


    def add_jobs(self):
        """
          Create the jobs from the reference time
        :return:
        """
        for a_file in self._files:
            f_path = "%s/%s" % (self._dir_files, a_file)
            print("Parsing xferlog file %s" % f_path )
            fd = open(f_path)
            self._parser.set_lines_to_parse(fd)
            for elem in self._parser:
                #print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
                #find file in index
                filepath = self._index.get(elem['file'], None)
                if filepath:
                    #get time difference
                    midnight_date = utc.localize(datetime.datetime.combine(elem['time'].date(), self.MIDNIGHT))
                    #print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
                    time_diff = elem['time'] - midnight_date
                    scheduled_date = self._reference_date + time_diff
                    #create job and schedule it with the time difference added to the starting reference time
                    d_trigger = DateTrigger(scheduled_date)

                    self._scheduler.add_job(self._job_func, d_trigger, args=[filepath, self._destination])
                else:
                    print("Could not find %s\n in Index" % (elem['file']))

        print("Player. %d jobs scheduled.\n" % (len(self._scheduler.get_jobs())))


    def start(self):
        """
        :return:
        """
        self._scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        print("Start Scheduler. Jobs will start to be played in %d sec." % self._defer_time)
        self._scheduler.start()
Exemplo n.º 35
0
class TaskExecutor:
    def __init__(self, db, task_instance, task_param):
        self.task_instance = task_instance
        self.task_param = task_param
        self.db = db
        # invoke log
        self.invoke_log_map = {}
        self.jobs = {}
        logging.config.fileConfig("../logger.ini")
        self.logger = logging.getLogger("taskExecutor")
        invoke_count = int(self.task_param.get_invoke_args()['invoke_count'])
        executors = {
            'default': {
                'type': 'threadpool',
                'max_workers': invoke_count + 1
            }
        }
        self.scheduler = BlockingScheduler(executors=executors)

    def execute(self):
        self.scheduler.add_listener(
            self._job_listener,
            events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR
            | events.EVENT_JOB_ADDED | events.EVENT_JOB_MISSED)

        # invoke_log_map up server
        self.scheduler.add_job(self._invoke_break_heart, "interval", seconds=2)
        try:
            self.scheduler.start()
        except Exception as e:
            print(e)
            self.scheduler.shutdown(wait=True)

    def _job_listener(self, ev):
        """
        监听job的事件,job完成后再发起下次调用,对于异常也要处理
        :param ev:
        :return:
        """
        if self.task_instance.status == 'off':
            return
        if ev.code == events.EVENT_JOB_ADDED:
            self.jobs[ev.job_id] = self.scheduler.get_job(ev.job_id)
        elif ev.code == events.EVENT_JOB_EXECUTED or ev.code == events.EVENT_JOB_ERROR:
            if ev.code == events.EVENT_JOB_ERROR:
                self.logger.error(ev.exception)
                self.logger.error(ev.traceback)
            job = self.jobs[ev.job_id]
            self.scheduler.add_job(
                job.func,
                next_run_time=(datetime.datetime.now() +
                               datetime.timedelta(seconds=1)),
                id=ev.job_id,
                args=job.args)
        else:
            pass

    def _invoke_break_heart(self):
        if self.task_instance.status == 'off':
            jobs = self.scheduler.get_jobs()
            for job in jobs:
                try:
                    job.pause()
                    job.remove()
                except Exception as e:
                    self.logger.error(e)
        self.db.save_task_logs(self.invoke_log_map)
Exemplo n.º 36
0
def run_clock():
    scheduler = BlockingScheduler()
    start_time = datetime.now(pytz.timezone('US/Eastern')).replace(hour=6, minute=30, second=0)
    scheduler.add_job(tick, 'interval', start_date=start_time, seconds=86400)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    scheduler.start()
Exemplo n.º 37
0
 def handle(self, *args, **kwargs):
     self.stdout.write(self.style.NOTICE('Preparing scheduler'))
     scheduler = BlockingScheduler(timezone=UTC)
     scheduler.add_job(periodically_run_job, 'interval', minutes=1)
     self.stdout.write(self.style.NOTICE('Start scheduler'))
     scheduler.start()
class Scheduler:
    def __init__(self, main_config):
        self.main_config = main_config
        self.create_dirs()
        self.logger = get_logger(main_config['project_name'],
                                 file=main_config['logs_dir'],
                                 level=main_config['log_level'])
        self.board = None
        self.scheduler = BlockingScheduler()
        self.setup()
        atexit.register(self._exit)

    def create_dirs(self):
        try:
            Path(self.main_config['logs_dir']).mkdir(parents=True,
                                                     exist_ok=True)
            Path(self.main_config['data_dir']).mkdir(parents=True,
                                                     exist_ok=True)

            Path(self.main_config['data_dir']).joinpath('sensors/').mkdir(
                parents=True, exist_ok=True)

            cameras_config = read_json(self.main_config['cameras_config'])
            for camera in cameras_config:
                Path(self.main_config['data_dir'])\
                    .joinpath('images/' + str(camera['type'] + '_' + str(camera['id'])))\
                    .mkdir(parents=True, exist_ok=True)
        except Exception as e:
            self.logger.error('Error creating file structure!')

    def setup(self):
        try:
            board_scheme = read_json(self.main_config['board'])
            sensors = read_json(self.main_config['sensors_config'])
            board = Board(board_scheme, sensors, self.logger)
            self.board = board
        except Exception as e:
            self.logger.warning(
                'No board specified in config or some error in Board init')
            self.logger.warning(str(e))
            raise UserWarning(str(e))

        for p in self.main_config['pipelines']:
            pipeline = read_json(p)
            pipeline_executor = PipelineExecutor(
                logger=get_logger(self.main_config['project_name'] + '.' +
                                  pipeline['name'],
                                  file=self.main_config['logs_dir'],
                                  level=self.main_config['log_level']),
                pipeline=pipeline['pipeline'],
                main_config=self.main_config,
                pipeline_name=pipeline['name'],
                board=self.board)
            self.scheduler.add_job(
                func=(lambda executor=pipeline_executor: executor.execute()),
                **pipeline['run_interval'])

    def start(self):
        try:
            self.logger.info(self.main_config['project_name'] + ' started')
            self.scheduler.start()
        except Exception as e:
            self.logger.error('Error starting scheduler!')
            self.logger.error(str(e))

    def _exit(self):
        self.board.exit()
        print('EXITING!!!')
        self.logger.info('System exited normally')
        self.scheduler.shutdown()
Exemplo n.º 39
0
Arquivo: base.py Projeto: timff/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(
            create_handler=self._handle_create_trigger,
            update_handler=self._handle_update_trigger,
            delete_handler=self._handle_delete_trigger,
            trigger_types=self._trigger_types,
            queue_suffix='timers')
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            LOG.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type,
                   'run_date') and datetime.now(tzutc()) > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)

    def _emit_trigger_instance(self, trigger):
        LOG.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()),
                 trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._trigger_dispatcher.dispatch(trigger, payload)

    def _register_timer_trigger_types(self):
        return container_utils.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
Exemplo n.º 40
0
observer.on('on_server_response', on_server_response)
socketIO.wait(seconds=1)


def scheduledAdvertiserScan():
    print datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    json_data = {
        'observer_id': OBSERVER_ID,
        'observer_location': OBSERVER_LOCATION,
        'advertiser_id': 'bbbbbbb',
        'rssi': -58,
        'data': '1234567c3'
    }
    print "Device ID: %s, RSSI: %d dB" % (json_data['advertiser_id'],
                                          json_data['rssi'])
    print "BlueHat Data: %s" % json_data['data']
    observer.emit('observer_json_msg', json_data)
    socketIO.wait(seconds=0.1)
    print('\n')


if __name__ == "__main__":
    print 'Starting Observer Scan Scheduler!\n'
    scheduler = BlockingScheduler()
    scheduler.add_job(scheduledAdvertiserScan,
                      'interval',
                      seconds=3,
                      id='advertiserScan',
                      misfire_grace_time=1)
    scheduler.start()
Exemplo n.º 41
0
def start_scheduled_job():
    sched = BlockingScheduler()
    sched.add_job(main, 'interval',
                  seconds=working_interval_sec, id="real_time_1")
    sched.start()
Exemplo n.º 42
0
    if len(my_accounts) is 0:
        brain_key = rpc.suggest_brain_key()
        account_registered, account_registration_response = register_account_faucet(config.account, brain_key['pub_key'])
        if account_registered:
            rpc.import_key(config.account, brain_key['wif_priv_key'])

            print("Account: %s succesfully registered" % config.account)
            print(rpc.list_my_accounts())

            print("Brain key: %s" % brain_key['brain_priv_key'])
            print("Write it down/back it up ^")

            print("Send funds to %s and start the bot again" % config.account)
        else:
            print("Account creation failed")
            print(brain_key)
            print(config.faucet + " response: ", account_registration_response)

    else:
        print(my_accounts)
        print(config.account)
        print(rpc.list_account_balances(config.account))
        print("Bot config: " + str(config.bots["MakerRexp"]))
        
        bot.init(config)
 
        run_bot() # running the bot before the scheduler, otherwise it will run for the first time after config.interval
        scheduler = BlockingScheduler()
        scheduler.add_job(run_bot, 'interval', hours=config.interval)
        scheduler.start()
Exemplo n.º 43
0
class XcxScrapy:
    def __init__(self):
        KEY1 = 'UwVrGX4x2r+Pk7bf1aItja=='
        self.token = '4ac1c0259b27f13dfb78c2959da3bf4e'
        self.pc = prpcrypt(b(KEY1))  # 初始化密钥
        self.info_log = get_logger('logs/info.log')
        self.db = self.connect_db()
        # 查找剩余需要爬取的疾病数量
        self.max_len = self.db.disease.count_documents({ 'finished': 0 })
        self.count = 0
        print('Number of the lefting disease: {}'.format(self.max_len))
        self.info_log.warning('Number of the lefting disease: {}'.format(self.max_len))
        if self.max_len > 0:
            print('Task started.')
            print('-' * 50)
            self.info_log.warning('Task started.....')
            # 定时爬取
            self.scheduler = BlockingScheduler()
            self.scheduler.add_job(self.request_data, 'interval', id='main_schedule', seconds=120, args=[self])
            self.scheduler.start()
        # self.init_database(self)
        # self.request_data(self)

    # 初始化数据库
    @staticmethod
    def init_database(self):
        print('Initial database started!')
        # 初始化疾病表
        disease_file = open('./disease.txt', 'r', encoding='UTF-8')
        try:
            for line in disease_file:
                tmp_line = line.strip().strip('\n')
                self.db.disease.insert_one({
                    'name': tmp_line,
                    'reply': '',
                    'finished': 0
                })
                print('Initial disease: ', tmp_line)
        finally:
            print('Initial database finished!')
            disease_file.close()

    @staticmethod
    def connect_db():
        instance = pymongo.MongoClient('127.0.0.1', 27017)
        db = instance.hebaochacha
        return db
    
    @staticmethod
    def request_data(self):
        # 查找即将爬取的疾病信息
        cur_disease = self.db.disease.find_one({ 'finished': 0 }, skip=self.count)
        question = cur_disease['name']
        print('Start to scrapy: {} ...'.format(question))
        self.info_log.critical('Start to scrapy: {} ...'.format(question))
        res = main(question, self.token)
        print('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        self.info_log.critical('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        if not res: return False
        if res.get('isSuccess'):
            result = res.get('result', {})
            iv = result.get('iv', '')
            content = result.get('content', '')
            if iv and content:
                answer = self.pc.decrypt(b(content), b(iv))
                answer = str(answer, encoding="utf-8")
                if answer:
                    # print(json.dumps(json.loads(str(answer, encoding="utf-8")), ensure_ascii=False, indent=2))
                    answer_re = re.compile('''"content":"(.*?)"''')
                    img_re = re.compile('''"resource_url":"(.*?)"''')
                    answer_list = answer_re.findall(''.join(answer.split()))
                    an = '\n'.join(answer_list)
                    img_list = img_re.findall(''.join(answer.split()))
                    im = '\n'.join(img_list)
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': an, 'images': im, 'finished': 1 } })
                    print('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1 }))
                    self.info_log.critical('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1  }))
                    self.count = self.count + 1
                    return True
                else:
                    print('Answer is empty.')
                    self.info_log.warning('Answer is empty.')
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                    self.count = self.count + 1
                    return False
            else:
                print('NO iv or content --- {}.'.format(question))
                self.info_log.warning('NO iv or content --- {}.'.format(question))
                self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                self.count = self.count + 1
                return False
        else:
            if res.get('errorMsg') == 'token已过期':
                print('Token is invild, please login again.')
                self.info_log.warning('Token is invild, please login again.')
                # 结束进程
                os._exit(0)
            else:
                self.count = self.count + 1
                return False