Exemple #1
0
def main(*args, **kwargs):
    config_filename = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CONFIG_FILENAME
    config = DEFAULT_CONFIG.copy()
    miners = []

    try:
        config_file = yaml.load(open(config_filename))
        config.update(config_file['defaults'])
        miners.extend(config_file['miners'])
    except FileNotFoundError:
        print('Config file \'{}\' was not found.'.format(config_filename))
        exit(1)
    except KeyError as e:
        print('Config did not contain section {}.'.format(e))
        exit(1)

    # print(config)
    # print(miners)

    scheduler = BlockingScheduler(job_defaults={'coalesce': True})
    scheduler.add_listener(listener, EVENT_JOB_ERROR)

    for idx, miner in enumerate(miners):
        schedules = miner.pop('schedule', [])
        device = Antminer(**miner)
        job_config = merge_dicts(config, {'jobs': [], 'idx': idx})
        job = scheduler.add_job(throttle, 'interval', args=(device,), kwargs=job_config,
                                misfire_grace_time=30, seconds=config['refresh_time'],
                                next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=idx * 0.2))
        job_config['jobs'].append(job)
        for schedule in schedules:
            print(schedule)
            trigger_args = {k: schedule.pop(k) for k in schedule.copy() if
                            k in ['year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second',
                                  'start_date', 'end_date']}
            print(trigger_args)
            job = scheduler.add_job(do_thing, 'cron', args=(device, schedule['command'], schedule['value'],),
                                    kwargs=job_config, **trigger_args)
            job_config['jobs'].append(job)


    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #2
0
def update_six_month_cases():
    os.system('python update_cases.py 6m')
    sendmail(['*****@*****.**'], 'Cv test case update(6m)', 'complete time: %s' % datetime.datetime.now())


def cal_and_report_of_one_week():
    os.system('python cal_ratio.py 1w')


def cal_and_report_of_six_month():
    os.system('python cal_ratio.py 6m')


if __name__ == '__main__':
    sched = BlockingScheduler()

    # 更新测试用例
    update_one_week_job = sched.add_job(update_one_week_cases, 'cron', day='*/2', hour='15', minute='00')
    update_six_month_job = sched.add_job(update_six_month_cases, 'cron', day='*/2', hour='19', minute='00')

    # 计算爬全率并邮件通知
    cal_report_one_week_job = sched.add_job(cal_and_report_of_one_week, 'cron', hour='15', minute='00')
    cal_report_six_month_job = sched.add_job(cal_and_report_of_six_month, 'cron', hour='15', minute='05')

    sched.add_listener(my_listener, EVENT_JOB_ERROR|EVENT_JOB_EXECUTED)
    sched.start()



                        print("找不到第{}页".format(list_number))
                        continue
                except Exception as e:
                    print(e)
                    continue
            print("全部完成,共插入{}条数据\n".format(count))

        except Exception as e:
            print(e)


if __name__ == "__main__":
    restart = 0
    scheduler = BlockingScheduler()
    scheduler.add_job(CMCS().run, 'interval', seconds=2)

    def my_listener(event):
        global restart
        restart += 1
        print(restart)
        if restart == 3:
            print("CMCS爬虫重启次数超过2次,停止!")
            os._exit(0)

        print("CMCS爬虫运行超时!重启!")
        scheduler.remove_all_jobs()
        scheduler.add_job(CMCS().run, 'interval', seconds=3600)

    scheduler.add_listener(my_listener, EVENT_JOB_MAX_INSTANCES)
    scheduler.start()
Exemple #4
0
@sched.scheduled_job(CronTrigger.from_crontab("*/30 * * * *"))  # every 30 min
def post_to_facebook():
    "Find a valid request and post it to Facebook."
    for identifier in ("en", "es", "pt", "main"):
        _post_to_facebook(identifier)


@sched.scheduled_job(CronTrigger.from_crontab("0 * * * *"))  # every hour
def register_media():
    "Register new media in the database."
    for media in (MediaRegister, EpisodeRegister):
        handler = media(only_w_subtitles=False)

        try:
            handler.load_new_and_deleted()
            handler.handle()
        except Exception as error:
            logger.debug("%s raised for %s. Ignoring", error, media)
            continue


def error_listener(event):
    exception = event.exception

    if not isinstance(exception, KinoException):
        handle_general_exception(exception)


sched.add_listener(error_listener, EVENT_JOB_ERROR)
Exemple #5
0
        validate block, and save if valid
    '''
    possible_block = Block(possible_block_dict)
    if possible_block.is_valid():
        # this means someone else won
        possible_block.self_save()
        # kill and restart mining block so it knows it lost
        try:
            sched.remove_job('mining')
            print('removed running mine job in validating possible block')
        except apscheduler.jobstores.base.JobLookupError:
            print("mining job didn't exist when validating possible block")
        print('re-adding mine for block validating_possible_block')
        sched.add_job(mine_for_block,
                      kwargs={
                          'rounds': STANDARD_ROUNDS,
                          'start_nonce': 0
                      },
                      id='mining')
        return True
    return False


# pylint: disable-msg = C0103
if __name__ == '__main__':
    kwargs = {'rounds': STANDARD_ROUNDS, 'start_nonce': 0}
    sched.add_job(mine_for_block, kwargs=kwargs, id='mining')
    sched.add_listener(mine_for_block_listener,
                       apscheduler.events.EVENT_JOB_EXECUTED)
    sched.start()
Exemple #6
0
class Heating(object):
  def __init__(self):
    with open('config.json') as json_data:
      self.config = json.load(json_data)
      json_data.close()
    logger.debug('Configuration: ' + str(self.config))

    self.processing_lock = threading.Lock()
    self.calendar_lock = threading.Lock()
    self.relay_lock = threading.Lock()

    self.heating_trigger = None
    self.preheat_trigger = None
    self.event_trigger = None
    #Sensible defaults
    self.events = None
    self.desired_temp = self.config['heating_settings']['minimum_temperature']
    self.current_temp = None
    self.proportional_time = 0
    self.time_on = None
    self.time_off = None
    self.event_sync_id = None

    self.relays = None
    self.relays_heating = None
    self.relays_preheat = None

    self.http_server = None
    self.temp_sensors = {}
    self.sched = None

    self.outside_temp = None
    self.outside_apparent_temp = None

  def start(self):
    logger.info('Starting')
    self.credentials = self.get_credentials()

    self.darksky_details = self.get_darksky_details()

    logger.debug('Setting up scheduler and error handler')
    self.sched = BlockingScheduler()
    self.sched.add_listener(self.scheduler_listener, EVENT_JOB_ERROR)

    logger.debug('Searching for temperature sensors')
    try:
      self.find_temp_sensors()
    except NoTagsFoundException as e:
      pass

    logger.debug('Searching for relay')
    #self.relay = BTRelay.find_relay()
    #self.relay = USBRelay.find_relay()
    self.relays = USBMultipleRelays.find_relays()
    for relay in self.relays._relays:
      if relay.port_numbers == tuple(self.config['relays']['heating']):
        self.relays_heating = relay
        logger.info('Found heating relay at ' + str(relay.port_numbers))
      elif relay.port_numbers == tuple(self.config['relays']['preheat']):
        self.relays_preheat = relay
        logger.info('Found preheat relay at ' + str(relay.port_numbers))
    if self.relays_heating is None:
      raise Exception('No heating relay found')

    logger.debug('Creating scheduler jobs')
    #Get new events every X minutes
    self.sched.add_job(self.get_next_event, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*/' + str(self.config['calendar_settings']['update_calendar_interval_hours']), minute = 0)

    self.sched.add_job(self.update_outside_temperature, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*', minute = '*/15')

    #Scan for new devices every minute
    self.sched.add_job(self.find_temp_sensors, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*', minute = '*')

    HttpHandler.heating = self
    logger.debug('Starting HTTP server')
    self.http_server = ThreadedHTTPServer(('localhost', 8080), HttpHandler)
    http_server_thread = threading.Thread(target=self.http_server.serve_forever)
    http_server_thread.setDaemon(True) # don't hang on exit
    http_server_thread.start()

    logger.debug('Starting scheduler')
    try:
      self.sched.start()
    except Exception as e:
      logger.error('Error in scheduler: ' + str(e))
      self.http_server.shutdown()
      self.sched.shutdown(wait = False)

  def scheduler_listener(self, event):
    if event.exception is not None or event.code == EVENT_JOB_MAX_INSTANCES:
      logger.error('Error in scheduled event: ' + str(event))
      logger.debug(type(event.exception))
      if not isinstance(event.exception, NoTemperatureException) and not isinstance(event.exception, NoTagsFoundException):
        logger.error('Killing all the things')
        raise Exception(str(event))
        #self.http_server.shutdown()
        #self.sched.shutdown(wait = False)
        #exit(1)

  def find_temp_sensors(self):
    self.temp_sensors = TempSensor.find_temp_sensors(self.temp_sensors)
    for sensor in list(self.temp_sensors.values()):
      if sensor.temp_job_id is None:
        logger.info('Setting scheduler job for ' + sensor.mac)
        #Get a new temperature every minute
        sensor.temp_job_id = self.sched.add_job(self.get_temperature, trigger = 'interval', \
          start_date = datetime.datetime.now(), seconds = self.config['heating_settings']['update_temperature_interval_seconds'], \
          name = sensor.mac + ' temperature job', args = (sensor,))

  def heating_on(self, proportion):
    self.time_on = pytz.utc.localize(datetime.datetime.utcnow())
    self.time_off = None
    self.proportional_time = proportion
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_heating.on()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_heating_trigger(proportion, True)

  def heating_off(self, proportion):
    self.time_off = pytz.utc.localize(datetime.datetime.utcnow())
    self.time_on = None
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_heating.off()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_heating_trigger(proportion, False)

  def preheat_on(self, time_off):
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_preheat.on()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_preheat_trigger(time_off)

  def preheat_off(self):
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_preheat.off()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()

  def check_relay_states(self):
    logger.debug('Checking states ' + str(self.relays.all_status()))
    iter = 0
    for s in self.relays.all_status():
      iter += 1
      if s == 0:
        self.relays.one_off(iter)
      else:
        self.relays.one_on(iter)

  def set_heating_trigger(self, proportion, on):
    self.proportional_time = proportion
    if self.heating_trigger is not None:
      try:
        self.heating_trigger.remove()
      except JobLookupError as e:
        pass
      self.heating_trigger = None

    if on:
      if proportion < self.config['heating_settings']['proportional_heating_interval_minutes']:
        run_date = self.time_on + datetime.timedelta(0,self.proportional_time * 60)
        logger.info('New proportional time: ' + str(proportion) + '/' + str(self.config['heating_settings']['proportional_heating_interval_minutes']) +\
          ' mins - will turn off at ' + str(run_date.astimezone(get_localzone())))
        self.heating_trigger = self.sched.add_job(\
          self.process, trigger='date', run_date=run_date, name='Proportional off at ' + str(run_date.astimezone(get_localzone())))
    else:
      if proportion > 0:
        if self.time_off is None:
          self.time_off = pytz.utc.localize(datetime.datetime.utcnow())
        run_date = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] - self.proportional_time) * 60)
        logger.info('New proportional time: ' + str(proportion) + '/' + str(self.config['heating_settings']['proportional_heating_interval_minutes']) +\
          ' mins - will turn on at ' + str(run_date.astimezone(get_localzone())))
        self.heating_trigger = self.sched.add_job(\
          self.process, trigger='date', run_date=run_date, name='Proportional on at ' + str(run_date.astimezone(get_localzone())))

  def set_preheat_trigger(self, time_off):
    if self.preheat_trigger is not None:
      try:
        self.preheat_trigger.remove()
      except JobLookupError as e:
        pass
      self.preheat_trigger = None
    logger.info('Preheat off at ' + str(time_off.astimezone(get_localzone())))
    self.preheat_trigger = self.sched.add_job(\
      self.process, trigger='date', run_date=time_off, name='Preheat off at ' + str(time_off.astimezone(get_localzone())))


  def get_temperature(self, sensor):
    try:
      sensor.get_ambient_temp()
    except NoTemperatureException as e:
      logger.warn('Removing sensor ' + sensor.mac + ' from sensors list due to disconnection')
      try:
        sensor.temp_job_id.remove()
      except JobLookupError as e:
        pass
      del self.temp_sensors[sensor.mac]

    self.update_current_temp()

  def update_current_temp(self):
    temps = []
    for mac, sensor in self.temp_sensors.items():
      if sensor.amb_temp is not None:
        temps.append(sensor.amb_temp)

    if not temps:
      raise NoTemperatureException()
    #self.current_temp = sum(temps) / float(len(temps))
    self.current_temp = min(temps)
    logger.info('Overall temperature is now ' + str(self.current_temp) + ' from ' + str(temps))

    self.process()

  def get_next_event(self):
    self.calendar_lock.acquire()
    http = self.credentials.authorize(httplib2.Http(timeout=self.config['calendar_settings']['calendar_timeout_seconds']))
    service = discovery.build('calendar', 'v3', http=http)

    now = datetime.datetime.utcnow().isoformat() + 'Z'
    logger.debug('Getting the next event')
    try:
      eventsResult = service.events().list(
        calendarId=self.config['calendar_settings']['calendar_id'], timeMin=now, maxResults=3, singleEvents=True, orderBy='startTime').execute()
      events = eventsResult.get('items', [])
      self.event_sync_id = str(uuid.uuid4())
      logger.debug('Sending request: ' + str({'id':self.event_sync_id, \
              'type':'web_hook', \
              'address':'https://www.steev.me.uk/heating/events', \
              'expiration':(int(time.time())+(self.config['calendar_settings']['update_calendar_interval_hours']*60*60))*1000 \
             }))
      hook_response = service.events().watch(calendarId=self.config['calendar_settings']['calendar_id'], \
        body={'id':self.event_sync_id, \
              'type':'web_hook', \
              'address':'https://www.steev.me.uk/heating/events', \
              'expiration':(int(time.time())+(self.config['calendar_settings']['update_calendar_interval_hours']*60*60))*1000 \
             })\
        .execute()
      if hook_response is not None:
        logger.debug('Got response' + str(hook_response) + ' from web_hook call')
    except HttpError as e:
      logger.error('HttpError, resp = ' + str(e.resp) + '; content = ' + str(e.content))
      logger.exception(e)
      self.calendar_lock.release()
      return
    except Exception as e:
      logger.exception(e)
      self.calendar_lock.release()
      return

    parsed_events = []
    if events:
      counter = 0
      for event in events:
        counter += 1
        start = event['start'].get('dateTime', event['start'].get('date'))
        start_date = parser.parse(start)
        end = event['end'].get('dateTime', event['end'].get('date'))
        end_date = parser.parse(end)

        try:
          desired_temp = float(event['summary'])
        except ValueError:
          if event['summary'].lower() == 'on':
            desired_temp = 'On'
          if event['summary'].lower() == 'preheat':
            desired_temp = 'Preheat'

        logger.info('Event ' + str(counter) + ' is ' + str(start_date.astimezone(get_localzone())) + \
          ' to ' + str(end_date.astimezone(get_localzone())) + ': ' + str(desired_temp))
        parsed_events.append({'start_date': start_date, 'end_date': end_date, 'desired_temp': desired_temp})
        if counter == 1:
          #Set a schedule to get the one after this
          if self.event_trigger is not None:
            try:
              self.event_trigger.remove()
            except JobLookupError as e:
              pass
            self.event_trigger = None

          self.event_trigger = self.sched.add_job(self.get_next_event, \
            trigger='date', run_date=end_date, name='Event end at ' + str(end_date.astimezone(get_localzone())))

          #Tell the processing that this is a new event so it resets the proportion to start again
          if self.events is None or start_date != self.events[0]['start_date'] or end_date != self.events[0]['end_date'] or desired_temp != self.events[0]['desired_temp']:
            logger.info('New event starting, resetting time off.')
            self.time_off = None

      self.events = parsed_events
    else:
      self.events = None

    self.calendar_lock.release()
    self.process()

  def update_outside_temperature(self):
    try:
      logger.info('Getting new outside temperature')
      with urllib.request.urlopen('https://api.darksky.net/forecast/' + self.darksky_details['api_key'] + '/' + self.darksky_details['latlong'] + '?exclude=[minutely,hourly,daily]&units=si') as darksky_url:
        data = json.loads(darksky_url.read().decode())
      logger.debug(str(data))

      if data['currently']:
        if data['currently']['apparentTemperature']:
          self.outside_apparent_temp = data['currently']['apparentTemperature']
          logger.info('Got outside apparent temperature: ' + str(self.outside_apparent_temp))
        if data['currently']['temperature']:
          self.outside_temp = data['currently']['temperature']
          logger.info('Got outside temperature: ' + str(self.outside_temp))
    except Exception as e:
      pass

  def process(self):
    logger.debug('Processing')
    #Main calculations. Figure out whether the heating needs to be on or not.
    if self.current_temp is None:
      return

    self.processing_lock.acquire()

    current_time = pytz.utc.localize(datetime.datetime.utcnow())
    current_temp = self.current_temp
    time_due_on  = None
    have_temp_event = False
    forced_on = False
    have_preheat = False

    if current_temp < self.config['heating_settings']['minimum_temperature']:
      #If we're below the minimum allowed temperature, turn on at full blast.
      logger.info('Temperature is below minimum, turning on')
      self.desired_temp = str(self.config['heating_settings']['minimum_temperature'])
      self.heating_on(self.config['heating_settings']['proportional_heating_interval_minutes'])

    elif self.events is not None:
      #Find preheat events
      index = -1
      while index < 3:
        index += 1

        if index >= len(self.events):
          break

        if self.events[index]['desired_temp'] == 'Preheat':
          if self.events[index]['start_date'] < current_time and not self.events[index]['end_date'] < current_time:
            have_preheat = True
            if not(self.relays_preheat._status):
              logger.info('Preheat on')
              self.preheat_on(self.events[index]['end_date'])
            break

      if (not have_preheat) and self.relays_preheat._status:
        self.preheat_off()

      #Find normal events
      index = -1
      next_time = None

      while index < 3:
        index += 1

        if index >= len(self.events):
          break

        if self.events[index]['desired_temp'] == 'Preheat':
          continue
        elif self.events[index]['desired_temp'] == 'On':
          if self.events[index]['start_date'] < current_time and not self.events[index]['end_date'] < current_time:
            forced_on = True
            if not(self.relays_heating._status):
              logger.info('Heating forced on')
              self.heating_on(self.config['heating_settings']['proportional_heating_interval_minutes'])
        else:
          have_temp_event = True
          break

    if have_temp_event:
      next_time =     self.events[index]['start_date']
      next_time_end = self.events[index]['end_date']
      next_temp =     self.events[index]['desired_temp']

      logger.debug('Processing data: ' + str(next_time.astimezone(get_localzone())) + \
        ' to ' + str(next_time_end.astimezone(get_localzone())) + ', ' + str(next_temp))

      self.desired_temp = str(next_temp)

      if next_time_end < current_time:
        #If the last event ended in the past, off.
        logger.warn('Event end time is in the past.')
        self.heating_off(0)

      elif not forced_on:
        temp_diff = next_temp - current_temp
        new_proportional_time = None
        if next_time < current_time:
          time_due_on = next_time
          logger.info('Currently in an event starting at ' + str(next_time.astimezone(get_localzone())) + \
            ' ending at ' + str(next_time_end.astimezone(get_localzone())) + ' temp diff is ' + str(temp_diff))

        #Check all events for warm-up temperature
        for event in self.events:
          if event['desired_temp'] == 'On' or event['desired_temp'] == 'Preheat':
            continue

          event_next_time = event['start_date']
          if event_next_time > current_time:
            event_desired_temp = event['desired_temp']
            event_temp_diff = event_desired_temp - current_temp
            logger.debug('Future event starting at ' + str(event_next_time.astimezone(get_localzone())) + \
              ' temp difference is ' + str(event_temp_diff))
            if event_temp_diff > 0:
              #Start X minutes earlier for each degree the heating is below the desired temp, plus Y minutes.
              event_time_due_on = event_next_time - datetime.timedelta(0,(event_temp_diff * self.config['heating_settings']['minutes_per_degree'] * 60) + (self.config['heating_settings']['effect_delay_minutes'] * 60))
              logger.debug('Future event needs warm up, due on at ' + str(event_time_due_on.astimezone(get_localzone())))
              if time_due_on is None or event_time_due_on < time_due_on or event_time_due_on < current_time:
                time_due_on = event_time_due_on
                next_temp = event_desired_temp
                temp_diff = event_temp_diff
                logger.debug('Future event starting at ' + str(event_next_time.astimezone(get_localzone())) + \
                  ' warm-up, now due on at ' + str(time_due_on.astimezone(get_localzone())))
                #Full blast until 0.3 degrees difference
                if event_temp_diff > 0.3:
                  new_proportional_time = 30
              elif time_due_on is None or event_next_time < time_due_on:
                time_due_on = event_next_time
              elif time_due_on is None or event_next_time < time_due_on:
                time_due_on = event_next_time

        if time_due_on < next_time:
          logger.info('Before an event starting at ' + str(next_time.astimezone(get_localzone())) +\
            ' temp diff is ' + str(temp_diff) + ' now due on at ' + str(time_due_on.astimezone(get_localzone())))

        if time_due_on <= current_time:
          if temp_diff < 0:
            logger.info('Current temperature ' + str(current_temp) + ' is higher than the desired temperature ' + str(next_temp))
            self.heating_off(0)
          else:
            if new_proportional_time is None:
              #Calculate the proportional amount of time the heating needs to be on to reach the desired temperature
              new_proportional_time = temp_diff * self.config['heating_settings']['proportional_heating_interval_minutes'] / 2

            if new_proportional_time < self.config['heating_settings']['minimum_active_period_minutes']: #Minimum time boiler can be on to be worthwhile
              new_proportional_time = self.config['heating_settings']['minimum_active_period_minutes']
            elif new_proportional_time > self.config['heating_settings']['proportional_heating_interval_minutes']:
              new_proportional_time = self.config['heating_settings']['proportional_heating_interval_minutes']

            #Are we currently on or off?
            if not(self.relays_heating._status) or self.time_on is None: #Off
              if self.time_off is None:
                time_due_on = next_time
                new_time_due_on = next_time
              elif new_proportional_time <= self.proportional_time:
                #Need to be on for less time - turn on in a bit
                time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (self.proportional_time * 60))
                new_time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (new_proportional_time * 60))
              else:
                #Need to be on for more time - turn on now
                time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (self.proportional_time * 60))
                new_time_due_on = current_time

              if new_time_due_on <= current_time:
                logger.info('Heating is off, due on at ' + str(new_time_due_on.astimezone(get_localzone())) +'; Turning on')
                self.heating_on(new_proportional_time)
              else:
                if new_proportional_time != self.proportional_time:
                  logger.info('Changing time next due on.')
                  self.set_heating_trigger(new_proportional_time, self.relays_heating._status)
                if time_due_on != new_time_due_on:
                  logger.info('Heating was off, due on at ' + str(time_due_on.astimezone(get_localzone())) +\
                                 '. Now due on at ' + str(new_time_due_on.astimezone(get_localzone())))
            else: #On
              time_due_off = self.time_on + datetime.timedelta(0,self.proportional_time * 60)
              if new_proportional_time < self.config['heating_settings']['proportional_heating_interval_minutes']:
                #Must have a time_on at this point
                new_time_due_off = self.time_on + datetime.timedelta(0,new_proportional_time * 60)
              else:
                new_time_due_off = next_time_end

              if new_time_due_off < current_time:
                logger.info('Heating was on, due off at ' + str(time_due_off.astimezone(get_localzone())) +'; Turning off')
                self.heating_off(new_proportional_time)
              else:
                if new_proportional_time != self.proportional_time:
                  logger.info('Changing time next due off.')
                  self.set_heating_trigger(new_proportional_time, self.relays_heating._status)
                if new_time_due_off != time_due_off:
                  logger.info('Heating was on, due off at ' + str(time_due_off.astimezone(get_localzone())) +\
                               '. Now due off at ' + str(new_time_due_off.astimezone(get_localzone())))

    else:
      self.desired_temp = str(self.config['heating_settings']['minimum_temperature'])
      #If we don't have an event yet, warn and ensure relay is off
      logger.info('No events available')
      if self.relays_heating._status:
        logger.debug('Heating off')
        self.heating_off(0)
      if have_preheat:
        logger.info('Preheat but no normal event available.')
      else:
        if self.relays_preheat._status:
          logger.debug('Preheat off')
          self.preheat_off()

    self.check_relay_states()
    self.processing_lock.release()

  def get_credentials(self):
    '''Gets valid user credentials from storage.

    If nothing has been stored, or if the stored credentials are invalid,
    the OAuth2 flow is completed to obtain the new credentials.

    Returns:
        Credentials, the obtained credential.
    '''
    home_dir = os.path.expanduser('~')
    credential_dir = os.path.join(home_dir, '.credentials')
    logger.debug('Getting credentials from ' + credential_dir)
    if not os.path.exists(credential_dir):
      os.makedirs(credential_dir)
    credential_path = os.path.join(credential_dir, 'calendar-heating.json')

    store = oauth2client.file.Storage(credential_path)
    credentials = store.get()
    parser = argparse.ArgumentParser(parents=[tools.argparser])
    flags = parser.parse_args()
    if not credentials or credentials.invalid:
      flow = client.flow_from_clientsecrets('client_secret.json', 'https://www.googleapis.com/auth/calendar.readonly')
      flow.user_agent = 'Heating'
      credentials = tools.run_flow(flow, store, flags)
      logger.info('Storing credentials to ' + credential_path)
    return credentials

  def get_darksky_details(self):
    with open('darksky_details.json') as json_data:
      details = json.load(json_data)
      json_data.close()
    logger.debug('DarkSky details: ' + str(details))
    return details
Exemple #7
0
class Scheduler(object, metaclass=Singleton):
    '''
    Description: Port8 scheduler process, this class is gateway to interact Port8 scheduler
    Steps:
        1. Instantiate this class
        2. Call startScheduler to start the scheduler (sync scheduler data from db will internally called to ensure the job stored in repositoy is in-sync
            with scheduler job store)
        3. call stopScheduler to stop the scheduler
        4. call addIntervalJob/addCronjob to schedule the job
    '''
    def __init__(self, argSchedulerType='Blocking', argSchedulerMode='Run'):

        try:
            # initializing
            self.Global = Global()
            self.Utility = Utility()
            self.InfraUtil = InfraUtility()
            self.db = DBMySql('Scheduler')
            self.Validation = Validation()
            self.ShcedUtil = SchedUtility()

            self.myModulePyFile = os.path.abspath(__file__)
            self.myClass = self.__class__.__name__
            self.mySchedulerType = argSchedulerType
            self.mySchedulerMode = argSchedulerMode

            #validating arguments
            self.Validation.validateSchedulerInitArg(self.mySchedulerType,
                                                     self.mySchedulerMode,
                                                     InvalidArguments)

            #Setting the infrastructure
            self.Infra = self.InfraUtil.setInfra(self.Global.SchedulerInfraKey)

            if not self.Infra:
                raise InfraInitializationError(
                    'Could not initialize {cls}'.format(
                        cls=(self.myModulePyFile, self.myClass)))

            #self.logger = self.Infra.SchedLogger
            self.logger = self.Infra.getInfraLogger(
                self.Global.SchedulerInfraKey)

            #self.__loadNStartcheduler__()

        except Exception as err:
            print(sys.exc_info()[1:], traceback.format_exc(limit=5))
            sys.exit(-1)

    def startScheduler(self):
        try:
            self.scheduleSchema = self.Utility.getACopy(
                self.Infra.scheduleSchema)
            self.intervalSchema = self.Utility.getACopy(
                self.Infra.intervalSchema)
            self.cronSchema = self.Utility.getACopy(self.Infra.cronSchema)
            self.processJobSchema = self.Utility.getACopy(
                self.Infra.processJobSchema)

            if self.mySchedulerMode == 'Run':
                argPaused = False
            else:
                argPaused = True
            #fi

            mySchedulerConfig = self.Utility.getACopy(
                self.Infra.schedulerConfigData)

            if self.mySchedulerType == 'Background':
                self.Scheduler = BackgroundScheduler(mySchedulerConfig)
            else:
                self.Scheduler = BlockingScheduler(mySchedulerConfig)

            self.Scheduler.start(paused=argPaused)

            # adding listener for scheduler/job
            self.Scheduler.add_listener(self.schedulerListener,
                                        events.EVENT_ALL)

        except Exception as err:
            raise err

    def stopScheduler(self):

        if self.Scheduler.running:
            self.logger.info('stopping scheduler')
            self.Scheduler.shutdown()
        else:
            self.logger.warning('scheduler is not running')

    def processJob(**keyWordArgs):
        '''
            1. Validating the argument using schema
            2. Update job "Executing"
            3. Execute Job
            4. Update Job Status "Completed"
        '''
        from com.port8.core.globals import Global
        from com.port8.core.utils import Utility
        from com.port8.core.infrautils import InfraUtility
        from com.port8.core.validation import Validation
        from com.port8.core.schedutility import SchedUtility

        import os, json, sys, time, inspect

        try:
            Global = Global()
            InfraUtil = InfraUtility()
            Utility = Utility()
            Validation = Validation()
            ShcedUtil = SchedUtility()

            Infra = ShcedUtil.InfraUtil.setInfra('Scheduler')

            logger = Infra.getInfraLogger('Scheduler')
            myProcessJobSchema = Infra.jsonSchema['Main']['process_job_schema']
            logger.debug(
                'Job Schema will be used for validation >> {schema}'.format(
                    schema=myProcessJobSchema))

            myKeyWordArgs = Utility.getACopy(keyWordArgs)
            myCurrentTime = time.time()
            myResponse = Utility.getResponseTemplate()
            logger.debug(
                'argument received >> {args}'.format(args=myKeyWordArgs))

            #validating argumnet (using json schema validator)
            Validation.validateArguments(myKeyWordArgs, myProcessJobSchema)
            logger.debug('arguments validated >>'.format(args=myKeyWordArgs))

            # building data for this run & persisiting data
            myJobCriteria = ' JobId = ' + repr(myKeyWordArgs['jobid'])

            myStartEventResult = ShcedUtil.processJobStartEvent(
                myKeyWordArgs['jobid'])

            #executing job
            logger.info('Executing .... : {arg}'.format(arg=myKeyWordArgs))

            myResult = InfraUtil.callFunc(**myKeyWordArgs['func_call'])
            myResult['Data'].update(
                {'ElapsedSecs': round(time.time() - myCurrentTime, 5)})

            logger.info('jobid {jobid} execution results: {result}'.format(
                jobid=myKeyWordArgs['jobid'], result=myResult))

            myFinishEventResult = ShcedUtil.processJobFinishEvent(
                myKeyWordArgs['jobid'], myResult)

            if myResult['Status'] == Global.Success:
                # calling job completed event
                if myFinishEventResult['Status'] == Global.Success:
                    Utility.buildResponse(myResponse, Global.Success,
                                          Global.Success,
                                          {'result': myFinishEventResult})
                else:
                    raise processJobError(
                        'Error returned from calling job finish event {error}'.
                        format(error=myFinishEventResult['Message']))
            else:
                raise processJobError(
                    'Error returned from job processing {error}'.format(
                        error=myResult['Message']))

            #building response

            return myResponse

        except Exception as err:
            # log the error and let the error to be raised to scheduler to ensure we have error being reported back to scheduler

            myErrorMsg, myTraceback = Utility.getErrorTraceback()
            logger.error(Global.DefPrefix4Error * Global.DefPrefixCount,
                         myErrorMsg)
            logger.error(Global.DefPrefix4Error * Global.DefPrefixCount,
                         myTraceback)
            Utility.buildResponse(myResponse, Global.UnSuccess, myErrorMsg)

            if 'myCurrentTime' in locals():
                myElapsedSecs = round(time.time() - myCurrentTime, 5)
            else:
                myElapsedSecs = round(time.time() - myCurrentTime, 5)

            myResponse['Data'].update({'ElapsedSecs': myElapsedSecs})
            myDbResult = ShcedUtil.processJobFinishEvent(
                myKeyWordArgs['jobid'], myResponse)

            raise err

    def scheduleIntervalJob(self):
        pass

    def buildCronTrigger(self, scheduleArg):
        try:
            return CronTrigger(**scheduleArg)
        except Exception as err:
            myErrorMessage = sys.exc_info()[1:]
            self.logger.error(
                'Error {error} in building CronTrigger using {data} '.format(
                    error=myErrorMessage, data=scheduleArg))
            raise err

    def buildCoalesce(self, coalesceArg):
        if coalesceArg == None:
            return True
        else:
            return coalesceArg
        #fi

    def scheduleJob(self, **keyWordArgs):
        '''
        This is for cron based scheduler
        '''
        try:
            #initializing
            myKeyWordArgs = self.Utility.getACopy(keyWordArgs)
            myResponse = self.Utility.getResponseTemplate()
            myProcessKeyWordArg = {}
            myData = ''
            myJobId = shcedUtil.getNewJob(self.Global.JobIdPrefixValue)

            #validating argumnet (using json schema validator)
            self.Validation.validateArguments(myKeyWordArgs,
                                              self.scheduleSchema)
            self.logger.debug(
                'arguments {args} validated'.format(args=myKeyWordArgs))

            # building job arguments
            myCoalesce = self.buildCoalesce(myKeyWordArgs['coalesce'])

            myProcessJobKeyArg = {
                'func_call': {
                    'module': myKeyWordArgs['func_call']['module'],
                    'cls': myKeyWordArgs['func_call']['cls'],
                    'clsArg': myKeyWordArgs['func_call']['clsArg'],
                    'method': myKeyWordArgs['func_call']['method'],
                    'methodArgType':
                    myKeyWordArgs['func_call']['methodArgType'],
                    'arguments': myKeyWordArgs['func_call']['arguments']
                }
            }

            myProcessJobKeyArg.update({'jobid': myJobId})
            myCronTrigger = self.buildCronTrigger(myKeyWordArgs['schedule'])

            myJob = self.Scheduler.add_job(self.processJob,
                                           trigger=myCronTrigger,
                                           replace_existing=True,
                                           id=myJobId,
                                           jobstore='default',
                                           coalesce=myKeyWordArgs['coalesce'],
                                           kwargs=myProcessJobKeyArg)

            myJobDetails = {
                'id': str(myJob.id),
                'name': str(myJob.name),
                'kwargs': myJob.kwargs,
                'trigger': str(myJob.trigger.fields),
                'func_ref': str(myJob.func_ref)
            }

            myCurrentTime = self.Utility.getCurrentTime()
            myResult = self.db.processDbRequest(operation = 'create', container = 'ScheduledJobs',\
                 dataDict = {
                    'JobId':myJob.id, 'JobName':myJob.name, 'JobTrigger': str(myJob.trigger), 'NextRun' : myJob.next_run_time,
                    'JobFunction' : myJob.func_ref, 'SubmittedBy':'test', 'SubmitDate':self.Utility.getCurrentTime(), 'Status':'Submitted',
                    'JobDetails':json.dumps(myJobDetails)}, commitWork = True)

            print('job persist result', myResult)

            if self.Utility.extractStatusFromResult(
                    myResult) == self.Global.Success:
                self.Utility.buildResponse(myResponse, self.Global.Success,
                                           self.Global.Success,
                                           {'Job': myJobDetails})
            else:
                self.Utility.buildResponse(
                    myResponse, self.Global.UnSuccess,
                    'Job has been submitted but could not persist data in database',
                    {'Job': myJobDetails})
            #fi

        except Exception as err:
            myMessage = sys.exc_info()[1:], traceback.format_exc(limit=1)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,
                                       myMessage)
            raise err

    def schedulerListener(self, eventArg):
        '''
        Description:
            This is internal method to handle all the event associated to scheduler via listener, will persist the event details in db. 
            This will be called internally
        '''
        if eventArg.code == events.EVENT_SCHEDULER_STARTED:
            #self.logger.info('EVENT: Scheduler statrted, event code {code}'.format(code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'SCHEDULER_STARTED'},
                commitWork=True)
        elif eventArg.code == events.EVENT_SCHEDULER_SHUTDOWN:
            #self.logger.info('EVENT: Scheduler shutdown, event code {code}'.format(code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'SCHEDULER_SHUTDOWN'},
                commitWork=True)
        elif eventArg.code == events.EVENT_SCHEDULER_PAUSED:
            #self.logger.info('EVENT: Scheduler paused, event code {code}'.format(code=eventArg.code))
            self.db.persistData(operation='create',
                                container='SchedulerEventLog',
                                dataDict={'EventName': 'SCHEDULER_PAUSED'},
                                commitWork=True)
        elif eventArg.code == events.EVENT_SCHEDULER_RESUMED:
            #self.logger.info('EVENT: Scheduler resumed, event code {code}'.format(code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'SCHEDULER_RESUMED'},
                commitWork=True)
        elif eventArg.code == events.EVENT_EXECUTOR_ADDED:
            self.logger.info('EVENT: Executor added, event code {code}'.format(
                code=eventArg.code))
            self.db.processDbRequest(operation='create',
                                     container='SchedulerEventLog',
                                     dataDict={'EventName': 'EXECUTOR_ADDED'},
                                     commitWork=True)
        elif eventArg.code == events.EVENT_EXECUTOR_REMOVED:
            self.logger.info(
                'EVENT: Executor removed, event code {code}'.format(
                    code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'EXECUTOR_REMOVED'},
                commitWork=True)
        elif eventArg.code == events.EVENT_JOBSTORE_ADDED:
            self.logger.info('EVENT: JobStore Added, event code {code}'.format(
                code=eventArg.code))
            self.db.processDbRequest(operation='create',
                                     container='SchedulerEventLog',
                                     dataDict={'EventName': 'JOBSTORE_ADDED'},
                                     commitWork=True)
        elif eventArg.code == events.EVENT_JOBSTORE_REMOVED:
            self.logger.info(
                'EVENT: Jobstore removed, event code {code}'.format(
                    code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'JOBSTORE_REMOVED'},
                commitWork=True)
        elif eventArg.code == events.EVENT_ALL_JOBS_REMOVED:
            self.logger.info(
                'EVENT: All jobs removed, event code {code}'.format(
                    code=eventArg.code))
            self.db.processDbRequest(
                operation='create',
                container='SchedulerEventLog',
                dataDict={'EventName': 'ALL_JOBS_REMOVED'},
                commitWork=True)
        elif eventArg.code == events.EVENT_JOB_ADDED:
            #myJobId = eventArg.job_id
            #self.logger.info('EVENT: Job added, job detail >>> {job}, {jobstore}'.format(job=eventArg.job_id, jobstore = eventArg.jobstore))
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog', \
                dataDict = {'EventName':'JOB_ADDED', 'EventDetails':json.dumps({'JobId':eventArg.job_id}) }, commitWork = True)
        elif eventArg.code == events.EVENT_JOB_REMOVED:
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog', \
                dataDict = {'EventName':'JOB_REMOVED', 'EventDetails':json.dumps({'JobId':eventArg.job_id}) }, commitWork = True)
        elif eventArg.code == events.EVENT_JOB_MODIFIED:
            #self.logger.info('EVENT: Job modified, job detail >>> {job}, {jobstore}'.format(job=eventArg.job_id, jobstore = eventArg.jobstore))
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog', \
                dataDict = {'EventName':'JOB_MODIFIED', 'EventDetails':json.dumps({'JobId':eventArg.job_id}) }, commitWork = True)
        elif eventArg.code == events.EVENT_JOB_EXECUTED:
            self.logger.info('EVENT: Job executed {event_code}, {job_id}, {job_store}, {sched_run_time}, {job_retval}, {error}, {traceback}'.\
                format(event_code=eventArg.code, job_id = eventArg.job_id, job_store = eventArg.jobstore, sched_run_time = eventArg.scheduled_run_time,\
                       job_retval = eventArg.retval, traceback = eventArg.traceback))
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog', \
                dataDict = {'EventName':'JOB EXECUTED', 'EventDetails': json.dumps({'job_id':eventArg.job_id,'sched_run_time':str(eventArg.scheduled_run_time),\
                            'job_retval': str(eventArg.retval),'traceback' : str(eventArg.traceback) })}, commitWork=True)
        elif eventArg.code == events.EVENT_JOB_ERROR:
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog', \
                dataDict = {'EventName':'JOB ERROR', 'EventDetails': json.dumps({'job_id':eventArg.job_id,'sched_run_time':str(eventArg.scheduled_run_time),\
                            'job_retval': str(eventArg.retval),'traceback' : str(eventArg.traceback)})}, commitWork=True)
        elif eventArg.code == events.EVENT_JOB_MISSED:
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog',\
                dataDict = {'EventName':'JOB MISSED', 'EventDetails': json.dumps({'job_id':eventArg.job_id})}, commitWork=True)
        elif eventArg.code == events.EVENT_JOB_SUBMITTED:
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog',\
                dataDict = {'EventName':'JOB_SUBMITTED', 'EventDetails': json.dumps({'job_id':eventArg.job_id})})
        elif eventArg.code == events.EVENT_JOB_MAX_INSTANCES:
            #self.logger.info('EVENT: Job maxinstance, job detail >>> {job}, {jobstore}'.format(job=eventArg.job_id, jobstore = eventArg.jobstore))
            self.db.processDbRequest(operation = 'create', container = 'SchedulerEventLog',\
                dataDict = {'EventName':'JOB_MAX_INSTANCES', 'EventDetails': json.dumps({'job_id':eventArg.job_id})})
        #fi

    def syncJobFromRepository():
        pass
Exemple #8
0
class JobScheduler(server.Server):
    def __init__(self):
        super(JobScheduler, self).__init__()
        self.mongo_client = None
        self.scheduler = None
        self.setup_mongo()
        self.setup_scheduler()

    def add_job(self,
                func,
                trigger=None,
                args=None,
                kwargs=None,
                id=None,
                name=None,
                **trigger_args):
        """
        'trigger': refer to apscheduler.triggers
        """
        self.scheduler.add_job(func,
                               trigger=trigger,
                               args=args,
                               kwargs=kwargs,
                               id=id,
                               name=name,
                               jobstore='mongo',
                               executor='default',
                               replace_existing=True,
                               **trigger_args)
        logging.info(
            "job added. func: {}, trigger: {}, args: {}, kwargs: {}, id: {}, name: {}, trigger_args: {}"
            .format(func.__name__, trigger, args, kwargs, id, name,
                    trigger_args))
        return self

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.remove_all_jobs()
        self.scheduler.shutdown()
        logging.info("job scheduler is stopped.")

    def setup_mongo(self):
        mc = cm.config[
            'mongo_scheduler']  # config named 'mongo_scheduler' is a must, or to override this method
        m = mongo.Mongo(**mc)
        self.mongo_client = m.get_conn()
        logging.info("mongo client setup done.")

    def setup_scheduler(self):
        mc = cm.config['mongo_scheduler']
        jobstores = {
            'default':
            SQLAlchemyJobStore(url='sqlite:///jobs.sqlite'),
            'mongo':
            MongoDBJobStore(client=self.mongo_client,
                            database=mc['db'],
                            collection=mc['collection'])
        }
        executors = {
            'default': ThreadPoolExecutor(10),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}
        self.scheduler = BlockingScheduler(jobstores=jobstores,
                                           executors=executors,
                                           job_defaults=job_defaults,
                                           timezone=tzlocal.get_localzone())
        self.scheduler.add_listener(self.__listener)
        logging.info("scheduler setup done.")

    @staticmethod
    def __listener(event):
        logging.info('received scheduler event: %s', event)
Exemple #9
0
        if _COUNTS['exception_count'] >= 5:
            sched.shutdown()
        _COUNTS['exception_count'] += 1


@retry(wait=wait_random_exponential(multiplier=1, max=60),
       stop=stop_after_attempt(2))
def post_data(sensordata):
    ssm = boto3.client('ssm')
    arn = ssm.get_parameter(
        Name='/Roomdata/RoomDataTopicArn')["Parameter"]["Value"]
    sns = boto3.client('sns')
    publish_data = data.get_data()
    publish_data.update({'locationId': LOCATION})
    logging.info(json.dumps(publish_data))
    response = sns.publish(TopicArn=arn, Message=json.dumps(publish_data))
    _COUNTS['exception_count'] = 0


if __name__ == '__main__':
    try:
        data = sensor_data()
        sched = BlockingScheduler()
        logging.info('Starting logging')
        sched.add_job(post_data, 'cron', minute='*/1', args=[data])
        sched.add_listener(my_listener, EVENT_JOB_ERROR)
        sched.start()

    except Exception as e:
        logging.exception('Error publishing data')
Exemple #10
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job
#           定时任务
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
sched = BlockingScheduler()


# 监听器
def listener(event):
    if event.exception:
        log.getlogger().info("【{}任务退出】{}".format(event.job_id,
                                                 event.exception.message))
    else:
        log.getlogger().info("【爬取任务正常运行】")


sched.add_listener(listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

# ================================================================
#           数据结构
global params
global original_data
original_data = OrderedDict()
# {
#     "0,0" : { #图幅名
#         "2019-03-31 17-49-00" : { #时间
#             "time" : "",          #时间
#             "timestamp" : "",     #时间戳
#             "url" : "",           #下载链接
#             "extent" : "",        #范围
#             "req_pnt" : "",       #该瓦片请求的点坐标
#             "file_path" : ""      #文件的path
})

mail_sender = MailSender()


def _notify_scheduler(event):
    if event.code == EVENT_SCHEDULER_START:
        content = 'scheduler start now %s' % get_now()
    elif event.code == EVENT_SCHEDULER_SHUTDOWN:
        content = 'scheduler shutdown now %s' % get_now()
    else:
        content = 'unknown'
    mail_sender.send_email(config.data['system_admin_email_list'],
                           'scheduler notify', content)

scheduler.add_listener(_notify_scheduler,
                       EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN)


def notify_error(event):
    if event.code == EVENT_JOB_ERROR:
        content = 'job: %s failed - %s - exception: %s - traceback: %s' \
                  % (event.job_id, event.scheduled_run_time,
                     event.exception, event.traceback)
    elif event.code == EVENT_JOB_MISSED:
        content = 'job: %s missed - %s ' % (event.job_id,
                                            event.scheduled_run_time)
    else:
        content = 'unknown'
    mail_sender.send_email(config.data['system_admin_email_list'],
                           'job error notify', content)
Exemple #13
0
class JobManage():
    def __init__(self):
        jobstores = {'default': MemoryJobStore()}
        executors = {
            'default': ThreadPoolExecutor(50)
            #             'processpool': ProcessPoolExecutor(3)
        }
        job_defaults = {'coalesce': False, 'max_instances': 50}
        self.sched = BlockingScheduler(jobstores=jobstores,
                                       executors=executors,
                                       job_defaults=job_defaults)
        self.addError()
        self.addJobExecuted()

    def addJob(self, func, jobId=None, cron=None, args=[], kwargs={}):
        '''
                                只支持cron的形式
            *  *  *  *  *  command
                                分 时 日 月 周 命令
                                
                                第1列表示分钟1~59 每分钟用*或者 */1表示
                                第2列表示小时1~23(0表示0点)
                                第3列表示日期1~31
                                第4列表示月份1~12
                                第5列标识号星期0~6(0表示星期天)
                                第6列要运行的命令
        '''
        if cron is None:
            raise Exception("cron cannot be Null")

        (minute, hour, day, month, week) = cron.split(" ")
        self.sched.add_job(func,
                           trigger='cron',
                           id=jobId,
                           hour=hour,
                           minute=minute,
                           day=day,
                           month=month,
                           week=week,
                           args=args,
                           kwargs=kwargs)

    def removeJob(self, jobId):
        self.sched.remove_job(jobId)

    def start(self):
        self.sched.start()

    def shutdown(self):
        self.sched.shutdown()

    def printJobs(self):
        self.sched.print_jobs()

    def getJobs(self):
        return self.sched.get_jobs()

    def addError(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_ERROR)

    def addJobExecuted(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_EXECUTED)

    def listener(self, event):
        if event.exception:
            log.error("任务【%s】 任务出错 : %s" % (event.job_id, event.traceback))
        else:
            log.debug("任务【%s】已经跑完,结束时间 : %s " % (event.job_id, getNow()))


# jobMange = JobManage()
Exemple #14
0
            return
        log.logger.info('update zops_agent done')

    def backup_agent(self):
        app_dir = os.path.join(prog_, '..')
        src = "%s/agent" % app_dir
        dst = "%s/agent.%s" % (app_dir, int(time.time()))
        backup_list = glob.glob('%s/agent.[0-9]*' % app_dir)
        max_backup = int(self.opts['max_backup'])
        if len(backup_list) >= max_backup:
            for bak in sorted(backup_list, key=lambda x: -int(os.path.basename(x).split('.')[1]))[max_backup - 1:]:
                shutil.rmtree(bak)
        shutil.copytree(src, dst)

if __name__ == '__main__':
    opts = read_config(config_file='conf/client.conf')
    wd = WatchDog(opts)
    #apscheduler.events
    def err_listener(ev):
        if ev.exception:
            aps_log.info('%s error.', str(ev.job))
        else:
            aps_log.info('%s miss', str(ev.job))
    from apscheduler.schedulers.blocking import BlockingScheduler
    from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_MISSED
    from apscheduler.triggers.interval import IntervalTrigger
    sched = BlockingScheduler()
    sched.add_listener(err_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED)
    sched.add_job(wd.watch_process, IntervalTrigger(seconds=10))
    sched.add_job(wd.watch_update, IntervalTrigger(seconds=10))
    sched.start()
        log.info("任务已增加...")
    elif event.code & EVENT_JOB_MISSED != 0:
        log.info("有些任务已错过...")


if __name__ == '__main__':
    # 初始化日志
    logconf.load_my_logging_cfg()
    basedir = os.path.abspath(
        os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
    log.info('项目数据目录为:{}'.format(basedir))
    log.info('开始初始化本地持久化数据目录...')
    initDataPath(basedir)
    log.info('开始初始化本地数据目录...')
    initdb(basedir)

    scheduler = BlockingScheduler()
    scheduler.add_listener(
        listener,
        EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_JOB_EXECUTED
        | EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_ADDED)

    # 第天晚上18至第二天凌晨6点上传数据,间隔10秒
    # scheduler.add_job(upData, 'cron', hour='0-6,18-23', second='0,10,20,30,40,50')
    scheduler.add_job(upData, 'cron', hour='*', second='0,10,20,30,40,50')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print('任务结束.')
        scheduler.shutdown()
Exemple #16
0
class Server(object):
    """
    This is the main background Orchestration class. This will run every REFRESH_TIMER
    seconds and perform tasks. Each task is assumed to follow these guidelines:
        * Available in the JOB_LIST variable or is an recording job.
        * Independent from any other function in the list.
        * Takes the scheduler and config as the only argument (as of now).
        * Raises anything when it fails.
        * Raises nothing for a successful run.

    In case the task fails, it logs appropriately. No mechanism (as yet) has been provided to
    log errors from scheduler side. That task lies with the task, itself.

    The core of this server is a scheduler(APScheduler). It makes sure that jobs(functions)
    run at the specified REFRESH_TIMER. Event listeners are put in place to react to failures/successes.
    """
    JOB_LIST = [run_job]
    REFRESH_TIMER = 5

    def __init__(self, config):
        self.config = config
        self.db = DBStorage(config)

        self.jobs = self.JOB_LIST

        logger.debug("Starting Orchestration.")

        self.sched = BlockingScheduler(
            jobstore=MemoryJobStore(),
            executor=ProcessPoolExecutor(5),
            job_defaults={
                "coalesce":
                True,  # Combine multiple waiting instances of the same job into one.
                "max_instances":
                1,  # Total number of concurrent instances for the sam job. Change to 1 for upload.
            },
        )

    def add_regular_jobs(self):
        for item in self.jobs:
            j = self.sched.add_job(
                item,
                args=[self.sched, self.config],
                trigger="interval",
                seconds=self.REFRESH_TIMER,
            )
            logger.critical("Added job {}: {}".format(j.id, j.func))

    def add_recording_jobs(self):
        job_list = self.db.get_job()

        for job in job_list:
            cron = job.start.split()

            j = self.sched.add_job(
                record_video,
                args=[job, self.config],
                trigger="cron",
                minute=cron[0],
                hour=cron[1],
                day=cron[2],
                month=cron[3],
                day_of_week=cron[4],
                year=cron[5],
            )
            logger.critical("Added job {}: {}".format(j.id, j.func))

    @staticmethod
    def endjob_listener(event):
        if event.exception:
            logger.critical("Job {}: FAILED".format(event.job_id))
        else:
            logger.critical("Job {}: SUCCEEDED with return value {}".format(
                event.job_id, event.retval))

    def run_server(self):
        self.add_regular_jobs()
        self.add_recording_jobs()

        self.sched.add_listener(self.endjob_listener,
                                EVENT_JOB_ERROR | EVENT_JOB_EXECUTED)

        try:
            self.sched.start()
        except KeyboardInterrupt:
            logger.info("Interrupt recieved.")
            self.sched.shutdown()
            logger.debug("Orchestration shut down.")
Exemple #17
0
def example4(current_path, rootpaths):
    print('into example4......')
    # ####产生数据日期设定,用户可以设置,时间精确到年月日
    # set_time = '20200309'
    # ####根目录设置
    # rootpath = 'C:\\Users\\Administrator\\Desktop\\test\\dataaaaa\\'

    set_times = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    ####产生数据日期设定,用户可以设置,时间精确到年月日
    yyyy = set_times[0:4]
    yyyymm = set_times[0:6]
    yyyymmdd = set_times[0:8]
    yyyymmddhh = set_times[0:10]
    yyyymmddhhmm = set_times[0:12]
    ####设置替换时间的匹配表达式
    repace_yyyymmdd = '_' + yyyymmdd
    repace_yyyymmddhh = '_' + yyyymmddhh
    repace_yyyymmddhhmm = '_' + yyyymmddhhmm

    ####根目录设置
    rootpath = rootpaths
    ####产品路径,统一由yyyy调整为yyyymm

    if ('Windows' == platform.system()):
        ####根据src路径下的样本数据,来产生设定日期的1天的数据 ,样例数据在当前路径下
        srcpathA = current_path + '\\gen_IONO_CET_ION\\201907\\20190716\\'  ##1小时的样本数据
        srcpathB = current_path + '\\gen_IONO_CET_ISM\\201907\\20190716\\'  ##1小时的样本数据
        srcpathC = current_path + '\\gen_IONO_FDS_ION\\2019\\20191115\\'  ##24小时的样本数据
        srcpathD = current_path + '\\gen_IONO_FDS_ISM\\202001\\20200101\\CDZJ\\CDZJ_ISM01_DBD_L11_01H_20200101000000.txt'  ##24小时的样本数据
        srcpathE = current_path + '\\gen_SOLAR_FDS_SOT\\201910\\20191012\\'
        srcpathF = current_path + '\\gen_SOLAR_CMA_SRT\\201803\\20180328\\SDZM\\YJGC_SDWH_TYSD_20180328_061601_L0_0000_01S.txt'
        srcpathG = current_path + '\\gen_SOLAR_FDS_SRT\\201912\\20191201\\'

        srcpath5 = current_path + '\\Data\\TEST\\FDS\\geomag\\FGM\\2020\\20200306\\'  ##24小时的样本数据
        srcpath6 = current_path + '\\Data\\TEST\\FDS\\atmos\\MET\\2020\\20200306\\'  ##24小时的样本数据
        srcpath7 = current_path + '\\Data\\TEST\\FDS\\atmos\\MST\\2020\\20200306\\'  ##24小时的样本数据
        srcpath8 = ''
        srcpath9 = ''
        srcpath10 = ''

        despathA = rootpath + '\\Data\\TEST\\CET\\iono\\ION\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##1小时的样本数据
        despathB = rootpath + '\\Data\\TEST\\CET\\iono\\ISM\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##1小时的样本数据
        despathC = rootpath + '\\Data\\TEST\\FDS\\iono\\ION\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##24小时的样本数据
        despathD = rootpath + '\\Data\\TEST\\FDS\\iono\\ISM\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##24小时的样本数据
        despathE = rootpath + '\\Data\\TEST\\FDS\\solar\\SOT\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##1小时的样本数据
        despathF = rootpath + '\\Data\\TEST\\CMA\\solar\\SRT\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##1小时的样本数据
        despathG = rootpath + '\\Data\\TEST\\FDS\\solar\\SRT\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##1小时的样本数据

        despath5 = rootpath + '\\Data\\TEST\\FDS\\geomag\\FGM\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##24小时的样本数据
        despath6 = rootpath + '\\Data\\TEST\\FDS\\atmos\\MET\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##24小时的样本数据
        despath7 = rootpath + '\\Data\\TEST\\FDS\\atmos\\MST\\' + yyyymm + '\\' + yyyymmdd + '\\'  ##24小时的样本数据
        despath8 = ''
        despath9 = ''
        despath10 = ''

    if ('Linux' == platform.system()):
        ####根据src路径下的样本数据,来产生设定日期的1天的数据,样例数据在当前路径下
        srcpathA = current_path + '/gen_IONO_CET_ION/201907/20190716/'  ##1小时的样本数据
        srcpathB = current_path + '/gen_IONO_CET_ISM/201907/20190716/'  ##1小时的样本数据
        srcpathC = current_path + '/gen_IONO_FDS_ION/2019/20191115/'  ##24小时的样本数据
        srcpathD = current_path + '/gen_IONO_FDS_ISM/202001/20200101/CDZJ/CDZJ_ISM01_DBD_L11_01H_20200101000000.txt'  ##24小时的样本数据
        srcpathE = current_path + '/gen_SOLAR_FDS_SOT/201910/20191012/'  ##24小时的样本数据
        srcpathF = current_path + '/gen_SOLAR_CMA_SRT/201803/20180328/SDZM/YJGC_SDWH_TYSD_20180328_061601_L0_0000_01S.txt'
        srcpathG = current_path + '/gen_SOLAR_FDS_SRT/201912/20191201/'

        srcpath5 = current_path + '/gen_FDS_GEOMAG_FGM/202003/20200330/'  ##15分钟的样本数据
        srcpath6 = current_path + '/gen_FDS_ATMOS_MET/201201/20120101/'  ##1小时的样本数据
        srcpath7 = current_path + '/gen_FDS_ATMOS_MST/201911/20191105/'  ##30分钟的样本数据
        srcpath71 = current_path + '/gen_FDS_ATMOS_MST/201911/20191106/'  ##30分钟的样本数据
        srcpath8 = current_path + '/gen_FDS_GEOMAG_FGM/202003/20200329/'  ##3小时的样本数据
        srcpath9 = current_path + '/gen_MDP_ATMOS_LID/201910/20191022/'  ##1天的样本数据
        srcpath10 = current_path + '/gen_MDP_GEOMAG_FGM/201910/20191021/'  ##1天的样本数据
        srcpath11 = current_path + '/gen_FDS_ATMOS_CMA_UPAR/202003/20200331/'  ##12小时的样本数据
        srcpath12 = current_path + '/gen_FDS_GEOMAG_FGM/202003/20200331/'  ##24小时的样本数据
        srcpath13 = current_path + '/gen_FDS_ATMOS_AFD_UPAR/202004/20200401/'  ##24小时的样本数据

        # despathA = rootpath + '/kjtq_data/CET/iono/ION/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        # despathB = rootpath + '/kjtq_data/CET/iono/ISM/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        # despathC = rootpath + '/kjtq_data/FDS/iono/ION/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据
        # despathD = rootpath + '/kjtq_data/FDS/iono/ISM/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据
        # despathE = rootpath + '/kjtq_data/FDS/solar/SOT/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        # despathF = rootpath + '/kjtq_data/CMA/solar/SRT/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        # despathG = rootpath + '/kjtq_data/FDS/solar/SRT/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据

        despathA = rootpath + '/kjtq_data/CET/iono/ION/XXXM_ION/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        despathB = rootpath + '/kjtq_data/CET/iono/ISM/XXXM_ISM/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        despathC = rootpath + '/kjtq_data/FDS/iono/ION/XXXJ_ION/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据
        despathD = rootpath + '/kjtq_data/FDS/iono/ISM/XXXJ_ISM/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据
        despathE = rootpath + '/kjtq_data/FDS/solar/SOT/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        despathF = rootpath + '/kjtq_data/CMA/solar/SRT/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        despathG = rootpath + '/kjtq_data/FDS/solar/SRT/XXXJ_SRT' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据

        despath5 = rootpath + '/kjtq_data/FDS/geomag/FGM/XXXJ_FGM/' + yyyymm + '/' + yyyymmdd + '/'  ##15分钟的样本数据
        despath6 = rootpath + '/kjtq_data/FDS/atmos/MET/XXXJ_MET/' + yyyymm + '/' + yyyymmdd + '/'  ##1小时的样本数据
        despath7 = rootpath + '/kjtq_data/FDS/atmos/MST/XXXM_MST/' + yyyymm + '/' + yyyymmdd + '/'  ##30分钟的样本数据
        despath71 = rootpath + '/kjtq_data/FDS/atmos/MST/XXXJ_MST/' + yyyymm + '/' + yyyymmdd + '/'  ##30分钟的样本数据
        despath8 = rootpath + '/kjtq_data/FDS/geomag/FGM/XXXJ_FGM/' + yyyymm + '/' + yyyymmdd + '/'  ##3小时的样本数据
        despath9 = rootpath + '/kjtq_data/MDP/atmos/LID/' + yyyymm + '/' + yyyymmdd + '/'  ##1天的样本数据
        despath10 = rootpath + '/kjtq_data/MDP/geomag/FGM/' + yyyymm + '/' + yyyymmdd + '/'  ##1天的样本数据
        despath11 = rootpath + '/kjtq_data/CMA/atmos/UPC/' + yyyymm + '/' + yyyymmdd + '/'  ##12小时的样本数据
        despath12 = rootpath + '/kjtq_data/FDS/geomag/FGM/XXXJ_FGM/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据
        despath13 = rootpath + '/kjtq_data/CMA/atmos/UPA/' + yyyymm + '/' + yyyymmdd + '/'  ##24小时的样本数据

    ####创建目标文件目录结构
    ####实际测试发现,创建目标目录的子目录时候,也使用makedirs创建多级目录,所以如下目录创建,可以不调用
    # if not os.path.exists(despathA):
    # os.makedirs(despathA)
    # if not os.path.exists(despathB):
    # os.makedirs(despathB)
    # if not os.path.exists(despathC):
    # os.makedirs(despathC)
    # if not os.path.exists(despathD):
    # os.makedirs(despathD)
    # if not os.path.exists(despathE):
    # os.makedirs(despathE)
    # if not os.path.exists(despathF):
    # os.makedirs(despathF)
    # if not os.path.exists(despathG):
    # os.makedirs(despathG)

    ####需要增加srcpath的合法性,否则后续程序会进入一种无法预知的状态
    if not os.path.exists(srcpathA):
        exit('do not exist %s' % srcpathA)
    if not os.path.exists(srcpathB):
        exit('do not exist %s' % srcpathB)
    if not os.path.exists(srcpathC):
        exit('do not exist %s' % srcpathC)
    if not os.path.exists(srcpathD):
        exit('do not exist %s' % srcpathD)
    if not os.path.exists(srcpathE):
        exit('do not exist %s' % srcpathE)
    if not os.path.exists(srcpathF):
        exit('do not exist %s' % srcpathF)
    if not os.path.exists(srcpathG):
        exit('do not exist %s' % srcpathG)

    #### 启动定时任务
    scheduler = BlockingScheduler()  #阻塞方式
    #sheduler = BackgroundScheduler()#非阻塞方式

    #### 用户添加自己的用例add_job
    #### despath,需要根据每天的日期校验替换,否则所有数据都存放到启动任务当天的文件夹下

    jobA = scheduler.add_job(
        func=gen_IONO_CET_ION_scheduler,
        args=[srcpathA, despathA],
        trigger='cron',
        hour='8-18',
        id='gen_IONO_CET_ION_scheduler')  #          #每天的08-18小时开始执行
    #jobB = scheduler.add_job(func=gen_IONO_CET_ISM_scheduler, args=[srcpathB, despathB], trigger='cron', hour='0-23',id='gen_IONO_CET_ISM_scheduler')          ##每天的00-23小时开始执行
    jobC = scheduler.add_job(
        func=gen_IONO_FDS_ION_scheduler,
        args=[srcpathC, despathC],
        trigger='cron',
        minute='00,15,30,45',
        id='gen_IONO_FDS_ION_scheduler')  ##每小时的00,15,30,45分开始执行
    #jobD = scheduler.add_job(func=gen_IONO_FDS_ISM_scheduler, args=[srcpathD, despathD], trigger='cron', hour='0-23',id='gen_IONO_FDS_ISM_scheduler')##每隔1个小时1次
    #jobE1 = scheduler.add_job(func=gen_SOLAR_FDS_SOT_scheduler, args=[srcpathE, despathE,'CGC'], trigger='cron', hour='14-14',minute='00,30',id='gen_SOLAR_FDS_SOT_scheduler CGC')##每隔30分钟执行1次
    #jobE2 = scheduler.add_job(func=gen_SOLAR_FDS_SOT_scheduler, args=[srcpathE, despathE,'CGQ'], trigger='cron', hour='14-14',minute='05,10,15,20,25,30,35,40,45,50,55',id='gen_SOLAR_FDS_SOT_scheduler CGQ')##每隔5分钟执行1次
    #jobE3 = scheduler.add_job(func=gen_SOLAR_FDS_SOT_scheduler, args=[srcpathE, despathE,'CGS'], trigger='cron', hour='14-14',minute='05,10,15,20,25,30,35,40,45,50,55',id='gen_SOLAR_FDS_SOT_scheduler CGS')##每隔5分钟执行1次
    #jobE4 = scheduler.add_job(func=gen_SOLAR_FDS_SOT_scheduler, args=[srcpathE, despathE,'CHA'], trigger='cron', hour='14-14',minute='05,10,15,20,25,30,35,40,45,50,55',id='gen_SOLAR_FDS_SOT_scheduler CHA')##每隔5分钟执行1次
    jobF = scheduler.add_job(
        func=gen_SOLAR_CMA_SRT_scheduler,
        args=[srcpathF, despathF],
        trigger='cron',
        hour='6-18',
        minute='00,03,06,09,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57',
        id='gen_SOLAR_CMA_SRT_scheduler')  ##每隔3分钟执行1次,06:00除外
    jobG = scheduler.add_job(
        func=gen_SOLAR_FDS_SRT_scheduler,
        args=[srcpathG, despathG],
        trigger='cron',
        hour='0-23',
        minute='00,15,30,45',
        id='gen_SOLAR_FDS_SRT_scheduler')  ##每小时的00,15,30,45分钟开始执行,00:00:00除外

    job5 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once,
                             args=[srcpath5, despath5],
                             trigger='cron',
                             minute='00,15,30,45',
                             id='FDS_geomag_FGM15M')  ##每小时的00,15,30,45分开始执行
    job6 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once,
                             args=[srcpath6, despath6],
                             trigger='cron',
                             minute='00',
                             id='FDS_atmos_MET')  ##每小时的00分开始执行
    job7 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once,
                             args=[srcpath7, despath7],
                             trigger='cron',
                             minute='00,30',
                             id='FDS_atmos_MST')  ##每小时的00,30分开始执行
    job71 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once,
                              args=[srcpath71, despath71],
                              trigger='cron',
                              minute='00,30',
                              id='FDS_atmos_MST1')  ##每小时的00,30分开始执行
    job8 = scheduler.add_job(
        func=copy_modify_yyyymmddhhmm_once,
        args=[srcpath8, despath8],
        trigger='cron',
        hour='00,03,06,09,12,15,18,21',
        id='FDS_geomag_FGM3H')  ##每天的00,03,06,09,12,15,18,21时开始执行
    job9 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once_mdp,
                             args=[srcpath9, despath9],
                             trigger='cron',
                             hour='00',
                             id='MDP_atmos_LID')  ##每天的00时00分开始执行
    job10 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once_mdp,
                              args=[srcpath10, despath10],
                              trigger='cron',
                              hour='00',
                              id='MDP_geomag_FGM')  ##每天的00时00分开始执行
    #job10 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once_mdp, args=[srcpath10, despath10],trigger='cron',minute='05,10,15,20,25,30,35,40,45,50,55', id='MDP_geomag_FGM')  ##每天的00时00分开始执行
    #job11 = scheduler.add_job(func=copy_modify_yyyymmdd_matchHH_scheduler, args=[srcpath11, despath11],trigger='cron',hour='00,12', id='FDS_atmos_cma_upar')  ##每天的00时、12时开始执行
    job11 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once_mdp,
                              args=[srcpath11, despath11],
                              trigger='cron',
                              hour='00,12',
                              id='FDS_atmos_cma_upar')  ##每天的00时、12时开始执行
    job12 = scheduler.add_job(func=copy_modify_yyyymmdd_matchHH_scheduler,
                              args=[srcpath12, despath12],
                              trigger='cron',
                              hour='00',
                              id='FDS_atmos_FGM24H')  ##每天的00时开始执行
    #job13 = scheduler.add_job(func=copy_modify_yyyymmdd_matchHH_scheduler, args=[srcpath13, despath13],trigger='cron',hour='12', id='FDS_atmos_AFD_UPAR')  ##每天的00时开始执行
    job13 = scheduler.add_job(func=copy_modify_yyyymmddhhmm_once_mdp,
                              args=[srcpath13, despath13],
                              trigger='cron',
                              hour='12',
                              id='FDS_atmos_AFD_UPAR')  ##每天的00时开始执行

    #### 任务列表
    #print (scheduler.get_jobs())

    #### 日志推送到邮箱
    jobX = scheduler.add_job(
        func=send_mail_segment,
        trigger='cron',
        hour='0,21',
        id='send_mail_segment')  ##每天0点,3点,8点,19点, 20点 推送1次

    #### 生产IRI网格数据
    jobR = scheduler.add_job(func=gen_IRI,
                             trigger='cron',
                             hour='22',
                             id='gen_IRI')  ##每天的00时开始执行

    #### 定时清理4天前的数据
    #### 过期天数,设置成4天,保留前72小时的数据即可,多留1天的余量数据
    expire_day = 7
    cleanpath1 = '/kjtq_data/'
    cleanpath11 = '/kjtq_data/FDS/solar/'
    cleanpath2 = '/kjtq_data/localdatafiles/'
    cleanpath3 = '/kjtq_data/localplugins/IRI'

    #jobY1 = scheduler.add_job(func=clean_dirs, args=[cleanpath1,expire_day], trigger='cron', hour='00',id='/Data/TEST/')                    #每天0点开始清理
    jobY11 = scheduler.add_job(func=clean_dirs,
                               args=[cleanpath11, expire_day],
                               trigger='cron',
                               hour='00',
                               id=cleanpath11)
    jobY2 = scheduler.add_job(func=clean_dirs,
                              args=[cleanpath2, expire_day],
                              trigger='cron',
                              hour='00',
                              id=cleanpath2)  #每天0点开始清理
    #jobY3 = scheduler.add_job(func=clean_dirs, args=[cleanpath3,expire_day], trigger='cron', hour='00',id=cleanpath3)   #每天0点开始清理

    #### 监听任务
    scheduler.add_listener(listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    #### 任务日志
    logging = log_setting()
    scheduler._logger = logging

    #### 启动任务,只能启动1次,不可以重复启动
    try:
        print('begin start......')
        ##start阻塞
        scheduler.start()
        print('end start......')
    except Exception as e:
        exit(str(e))
Exemple #18
0
        print(event.exception)
    scheduler.remove_all_jobs()
    if 'MACOS' in os.environ and os.environ['MACOS']:
        producer = Producer(server_host='localhost:2181',
                            distributor_kafka_host='localhost:9092')
    else:
        producer = Producer()
    add_job()


if __name__ == '__main__':
    print(sys.argv)
    if 'MACOS' in os.environ and os.environ['MACOS']:
        producer = Producer(server_host='40.253.65.179:2181',
                            distributor_kafka_host='40.253.65.179:9092')
    elif len(sys.argv) > 2:
        producer = Producer(server_host=sys.argv[1],
                            distributor_kafka_host=sys.argv[2])
    else:
        producer = Producer()
    scheduler = BlockingScheduler()
    scheduler.add_listener(
        scheduler_listener,
        EVENT_JOB_MAX_INSTANCES | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
    add_job()

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit) as e:
        scheduler.shutdown()
Exemple #19
0
                  args=[1],
                  name='抓取一天内最新评论',
                  trigger='interval',
                  minutes=10)

## 每小时抓一次最近三天的评论
scheduler.add_job(commentSpider.crawlLatestComments,
                  args=[3],
                  name='抓取三天内最新评论',
                  trigger='interval',
                  hours=1)

## 最近一周的评论 每晚抓取一次评论
scheduler.add_job(commentSpider.crawlLatestComments,
                  kwargs={
                      'day': 7,
                      'useThread': False,
                      'crawlAll': True
                  },
                  name='抓取一周内文章评论',
                  trigger='cron',
                  hour=5,
                  minute=30)


def errorListener(event):
    ravenClient.capture(event.exception)


scheduler.add_listener(errorListener, mask=apscheduler.events.EVENT_JOB_ERROR)
Exemple #20
0
def my_job2(x):
    print(dt.now().strftime("%Y-%m-%d %H:%M:%S"), x)
    print(1 / 0)


def my_listener(event):
    if event.exception:
        print(type(event))
        print("task error")
    else:
        print(type(event))
        print("task is running")


scheduler = BlockingScheduler()

scheduler.add_job(func=my_job,
                  args=("interval task", ),
                  trigger="interval",
                  seconds=5,
                  id="interval_task")
scheduler.add_job(func=my_job2,
                  args=("once task", ),
                  trigger="date",
                  next_run_time=dt.now() + timedelta(seconds=15),
                  id="once_task")

scheduler.add_listener(my_listener)

scheduler.start()
Exemple #21
0
class NifSync(threading.Thread):
    """Populate and sync change messages from NIF api

    Currently handles following services and respective entity types:

    * SynchronizationService/GetChanges3:
        * Person
        * Function
        * Organization
    * SynchronizationService/GetChangesCompetence2:
        * Competence
    * SynchronizationService/GetChangesLicense:
        * License

    .. danger::
        The club (org_type_id=5) integration users can only use GetChanges3 ( :py:attr:`sync_type` ='changes'). For
        GetChangesCompetence2 ( :py:attr:`sync_type` ='competence') and
        GetChangesLicense ( :py:attr:`sync_type` = 'license') an integration user on federation level is required

    The class will automatically handle when to :py:meth:`populate` and :py:meth:`sync`.

    .. note::
        :py:meth:`_check` is called on init and checks with the api to find last change message for
        :py:attr:`org_id` and the last message is then used as the initial starting point.


    :param org_id: The integration user organization id, required
    :type org_id: int
    :param login: The full path integration username ('app_id/function_id/username'), required
    :type login: str
    :param password: Password, required
    :type password: str
    :param created: A datetime string representing creation date of org_id, required
    :type created: str
    :param stopper: a threading.Event flag to exit
    :type stopper: threading.Event
    :param restart: On True will reset all AppLogger handlers
    :type restart: bool
    :param background: Sets the scheduler. Defaults to False and BlockingScheduler
    :type background: bool
    :param initial_timedelta: The initial timedelta to use from last change message in ms
    :type initial_timedelta: int
    :param overlap_timedelta: A optional timedelta for overlap functions in hours
    :type overlap_timedelta: int
    :param lock: The semaphore object, if None uses :py:class:`.FakeSemaphore`
    :type lock: threading.BoundedSemaphore
    :param sync_type: The sync type for this user, allowed ``changes``, ``competence`` and ``license``. Defaults to ``changes``.
    :type sync_type: str
    :param sync_interval: The interval for the sync scheduler in minutes. Defaults to NIF_SYNC_INTERVAL
    :type sync_interval: int
    :param populate_interval: The interval for populating in days. Defaults to NIF_POPULATE_INTERVAL
    :type populate_interval: int

    Usage - threading::

        from sync import NifSync
        sync = NifSync(org_id, username, password)
        sync.start()  # sync is of threading.Thread

    Usage - blocking::

        from sync import NifSync
        sync = NifSync(org_id, username, password)
        sync.run()  # sync starts without thread running

    Usage - with semaphore::

        import threading
        from sync import NifSync
        bound_semaphore = threading.BoundedSemaphore(value=10)
        sync = NifSync(org_id, username, password, lock=bounding_semaphore)
        sync.start()  # sync is of threading.Thread, semaphore has 10 slots

    .. note::
        The usernames and passwords for integration users on club level is stored in integration/users and accessible
        through :py:mod:`
    """
    def __init__(self,
                 org_id,
                 username,
                 password,
                 created,
                 stopper=False,
                 restart=False,
                 background=False,
                 initial_timedelta=0,
                 overlap_timedelta=0,
                 lock=None,
                 sync_type='changes',
                 sync_interval=NIF_CHANGES_SYNC_INTERVAL,
                 populate_interval=NIF_POPULATE_INTERVAL):

        self.state = SyncState()

        # Init thread
        super().__init__(name='klubb-{0}'.format(org_id))

        if sync_type in ['changes', 'license', 'competence', 'federation']:
            self.sync_type = sync_type
        else:
            raise Exception('{} is not a valid sync type'.format(sync_type))

        self.id = org_id
        self.username = username

        self.started = datetime.now()
        self.sync_errors = 0
        # self.sync_errors_max = 3  # Errors in a row!

        self.sync_interval = sync_interval  # minutes
        self.populate_interval = populate_interval  # days

        self.initial_timedelta = initial_timedelta
        self.overlap_timedelta = overlap_timedelta

        self.messages = 0  # Holds number of successfully processed messages

        self.stopper = stopper
        self.background = background

        self.initial_start = None
        self.from_to = [None, None]
        self.sync_started = False

        self.tz_local = tz.gettz(LOCAL_TIMEZONE)
        self.tz_utc = tz.gettz('UTC')

        #  Init logger
        self.log = AppLogger(name='klubb-{0}'.format(org_id),
                             stdout=not background,
                             last_logs=100,
                             restart=restart)

        # No stopper, started directly check for stream resume token!
        if self.stopper is False:
            from pathlib import Path
            resume_token = Path(STREAM_RESUME_TOKEN_FILE)

            if resume_token.is_file() is not True:
                self.log.warning(
                    'No resume token at {}'.format(STREAM_RESUME_TOKEN_FILE))
                self.log.warning(
                    'Requires stream to have or be running and a valid token file'
                )

        if lock is not None and (isinstance(lock, threading.BoundedSemaphore)
                                 or isinstance(lock, threading.Semaphore)):
            self.lock = lock
        else:
            self.lock = FakeSemaphore(
            )  # Be able to run singlethreaded as well

        # Lungo REST API
        self.api_integration_url = '%s/integration/changes' % API_URL

        # Make a startup log entry
        self.log.debug('[STARTUP]')
        self.log.debug('Org_id:     {0}'.format(org_id))
        self.log.debug('Login:      {0}'.format(username))
        self.log.debug('Pwd:        {0}'.format(password))
        self.log.debug('Created:    {0}'.format(created))
        self.log.debug('Skew:       {0} seconds'.format(
            self.initial_timedelta))
        self.log.debug('Sync:       {0} minutes'.format(self.sync_interval))
        self.log.debug('Populate:   {0} hours'.format(self.populate_interval))
        self.log.debug('Api url:    {0}'.format(self.api_integration_url))

        # Created
        self.org_created = dateutil.parser.parse(created)
        if self.org_created.tzinfo is None or self.org_created.tzinfo.utcoffset(
                self.org_created) is None:
            """self.org_created is naive, no timezone we assume CET"""
            self.org_created = self.org_created.replace(tzinfo=self.tz_local)

        self.org_id = org_id

        try:
            self.nif = NifApiSynchronization(username,
                                             password,
                                             realm=NIF_REALM,
                                             log_file=SYNC_LOG_FILE,
                                             test_login=False)
        except:
            self.log.exception(
                'Sync client creation for {} failed, terminating'.format(
                    username))
            # sys.exit(0)
            raise Exception('Could not create sync client')

        # Setup job scheduler
        if self.background:
            self.scheduler = BackgroundScheduler()
            self.log.info('Scheduler:  BackgroundScheduler')
        else:
            self.scheduler = BlockingScheduler()
            self.log.info('Scheduler:  BlockingScheduler')

        self.job_misfires = 0
        self.scheduler.add_listener(self._job_fire, EVENT_JOB_EXECUTED)
        self.scheduler.add_listener(self._job_misfire, EVENT_JOB_MISSED)

        self.job = self.scheduler.add_job(self.sync,
                                          'interval',
                                          minutes=self.sync_interval,
                                          max_instances=1)

        self.state.set_state(state='finished')

    def __del__(self):
        """Destructor, shutdown the scheduler on exit"""

        try:
            self.log.debug('Destructor called, terminating thread')
        except:
            pass

        try:
            if self.scheduler.running is True:
                self.scheduler.shutdown(wait=False)
                self.log.debug('Shutting down scheduler')
        except:
            self.log.error('Could not shut down scheduler')
            pass

    @property
    def uptime(self) -> (int, int):
        """Calculate thread uptime

        :returns uptime: integer tuple (days, seconds) since thread start
        """

        t = datetime.now() - self.started
        return t.days, t.seconds

    @property
    def job_next_run_time(self):

        if self.scheduler.state == 1:
            return self.job.next_run_time

        return None

    def job_pause(self):

        if self.scheduler.state == 1:
            self.job.pause()

    def job_resume(self):

        if self.scheduler.state == 1:
            self.job.resume()

    @property
    def scheduler_state(self):

        return self.scheduler.state

    def _job_misfire(self, event) -> None:
        """Callback for failed job execution. Increases :py:attr:`job_misfires`

        :param event: apcscheduler.Event
        """
        self.job_misfires += 1

    def _job_fire(self, event) -> None:
        """Callback for succesfull job execution. Decreases :py:attr:`job_misfires`

        :param event: apcscheduler.Event
        """
        if self.job_misfires > 0:
            self.job_misfires -= 1

    def run(self) -> None:
        """Start the thread, conforms to threading.Thread.start()

        Calls :py:meth:`._check` which determines wether to run :py:meth:`populate` or start a job with target
        :py:meth:`sync`
        """
        self.log.debug('[Starting thread]')
        self._check()

    def _stopper(self, force=False) -> None:
        """If stopper is threading event and is set, then terminate"""

        # Check if too many errors
        if self.sync_errors >= NIF_SYNC_MAX_ERRORS:
            self.state.set_state(mode=self.state.mode,
                                 state='terminated',
                                 reason='too many errors')
            self._shutdown()  # because setting stopper propagates to all!

        if isinstance(self.stopper, threading.Event):

            if self.stopper.is_set():
                self.log.warning('Stopper is set, terminating thread')
                self._shutdown()

        if force is True:
            self.log.warning('Forcing shutdown, terminating thread')
            self._shutdown()

    def _shutdown(self) -> None:
        """Shutdown in an orderly fashion"""

        if self.scheduler.state > 0:
            try:
                self.log.debug('Shutting down scheduler')
                if self.scheduler.running is True:
                    self.scheduler.shutdown(wait=False)  # wait=False
            except Exception as e:
                self.log.exception('Error shutting down scheduler')

        self.log.exception('[TERMINATING]')

        # Terminate instance/thread
        sys.exit(0)

    def _check(self) -> None:
        """Checks to decide to populate or sync on startup

        .. danger::
            On errors from the api, calls :py:meth:`_stopper(force=True)` which will terminate thread.
        """

        self.state.set_state(mode='checking', state='running')
        # @TODO: check if in changes/stream - get last, then use last date retrieved as start_date (-1microsecond)
        changes = requests.get(
            '%s?where={"_org_id":%s, "_realm":"%s"}&sort=[("sequence_ordinal", -1)]&max_results=1'
            % (self.api_integration_url, self.org_id, NIF_REALM),
            headers=API_HEADERS)

        if changes.status_code == 404:
            # populate, should not happen!
            # self.populate()
            self.log.error('404 from {0}, terminating'.format(
                self.api_integration_url))
            self._stopper(force=True)

        elif changes.status_code == 200:

            r = changes.json()
            c = r['_items']

            if len(c) == 0:
                self.log.debug('No change records, populating')
                self.populate()

            elif len(c) == 1:
                # Check date then decide to populate or not!
                self.log.debug('Got last change records, checking times')

                sequential_ordinal = dateutil.parser.parse(
                    c[0]['sequence_ordinal']).replace(tzinfo=self.tz_utc)

                self.log.debug('Last change message recorded {0}'.format(
                    sequential_ordinal.astimezone(self.tz_local).isoformat()))

                self.initial_start = sequential_ordinal + timedelta(
                    seconds=self.initial_timedelta) - timedelta(
                        hours=self.overlap_timedelta)

                if self.initial_start.tzinfo is None or self.initial_start.tzinfo.utcoffset(
                        self.initial_start) is None:
                    self.initial_start = self.initial_start.replace(
                        self.tz_local)

                if self.initial_start < datetime.utcnow().replace(
                        tzinfo=self.tz_utc) - timedelta(
                            hours=self.populate_interval):
                    """More than 30 days!"""
                    self.log.debug('More than {} days, populating'.format(
                        self.populate_interval))
                    self.populate()
                    self.state.set_state(mode='populate', state='initialized')
                else:
                    self.log.debug('Less than {} hours, syncing'.format(
                        self.populate_interval))
                    self.job.modify(next_run_time=datetime.now())
                    self.log.debug('Told job to start immediately')
                    self.log.debug('Starting sync scheduler')
                    self.scheduler.start()
                    self.state.set_state(mode='sync', state='started')

        else:
            self.log.error('{0} from {1}, terminating'.format(
                changes.status_code, self.api_integration_url))
            sys.exit()

    def _eve_fix_sync(self, o) -> dict:
        """Just make soap response simpler

        .. danger::
            Deferred. Uses :py:class:`typings.changes.Changes` instead.
        """

        if o['Changes'] is None:
            o['Changes'] = []
        elif 'ChangeInfo' in o['Changes']:
            o['Changes'] = o['Changes']['ChangeInfo']

            for key, value in enumerate(o['Changes']):
                if o['Changes'][key]['MergeResultOf'] is None:
                    o['Changes'][key]['MergeResultOf'] = []
                elif 'int' in o['Changes'][key]['MergeResultOf']:
                    o['Changes'][key]['MergeResultOf'] = o['Changes'][key][
                        'MergeResultOf']['int']
                else:
                    o['Changes'][key]['MergeResultOf'] = []

        o['_org_id'] = self.org_id

        return o

    def _update_changes(self, changes) -> None:
        """Update change message

        .. note::
            Creates a custom unique '_ordinal' for each change message before trying to insert into api. The purpose
            is to let it gracefully fail with a http 422 if the change message already exists in the api::

                sha224(bytearray(entity_type, id, sequence_ordinal, org_id))

        :param changes: list of change messages
        :type changes: :py:class:`typings.changes.Changes`
        """

        for v in changes:
            v['_ordinal'] = hashlib.sha224(
                bytearray(
                    "%s%s%s%s" % (v['entity_type'], v['id'],
                                  v['sequence_ordinal'], self.org_id),
                    'utf-8')).hexdigest()
            # bytearray("%s%s%s%s" % (self.org_id, v['EntityType'], v['Id'], v['sequence_ordinal']), 'utf-8')).hexdigest()

            v['_status'] = 'ready'  # ready -> running -> finished
            v['_org_id'] = self.org_id
            v['_realm'] = NIF_REALM

            r = requests.post(self.api_integration_url,
                              data=json.dumps(v, cls=EveJSONEncoder),
                              headers=API_HEADERS)

            if r.status_code == 201:
                self.log.debug(
                    'Created change message for {0} with id {1}'.format(
                        v['entity_type'], v['id']))
                self.messages += 1

            elif r.status_code == 422:
                self.log.debug('422 {0} with id {1} already exists'.format(
                    v['entity_type'], v['id']))
            else:
                self.log.error(
                    '{0} - Could not create change message for {1} with id {2}'
                    .format(r.status_code, v['entity_type'], v['id']))
                self.log.error(r.text)

    def _get_change_messages(self, start_date, end_date, resource) -> None:
        """Use NIF GetChanges3"""

        # To avoid future date?
        time.sleep(NIF_SYNC_DELAY)

        if resource == 'changes':
            status, changes = self.nif.get_changes(
                start_date.astimezone(self.tz_local),
                end_date.astimezone(self.tz_local))
        elif resource == 'competence':
            status, changes = self.nif.get_changes_competence(
                start_date.astimezone(self.tz_local),
                end_date.astimezone(self.tz_local))
        elif resource == 'license':
            status, changes = self.nif.get_changes_license(
                start_date.astimezone(self.tz_local),
                end_date.astimezone(self.tz_local))
        elif resource == 'federation':
            status, changes = self.nif.get_changes_federation(
                start_date.astimezone(self.tz_local),
                end_date.astimezone(self.tz_local))
        else:
            raise Exception('Resource gone bad, {}'.format(resource))

        if status is True:

            self.log.debug('Got {} changes for {}'.format(
                len(changes), resource))
            if len(changes) > 0:
                self._update_changes(changes)
            else:
                self.log.debug('Empty change messages list')
            """    
            try:
                self.log.debug('Got {} changes for {}'.format(len(changes), resource))
                if len(changes) > 0:
                    self._update_changes(changes)
            except TypeError:
                self.log.debug('Empty change messages list (_get_changes3)')
            except Exception as e:
                self.log.exception('Unknown exception (_get_changes3)')
            """

        else:
            self.log.error('GetChanges returned error: {0} - {1}'.format(
                changes.get('code', 0), changes.get('error', 'Unknown error')))
            raise Exception('_get_changes_messages returned an error')

    def _get_changes(self, start_date, end_date) -> None:
        """Get change messages based on :py:attr:`.sync_type`"""

        self.from_to = [start_date, end_date]  # Adding extra info

        try:
            self._get_change_messages(start_date, end_date, self.sync_type)

            if self.sync_errors > 0:
                self.sync_errors -= 1

            return True

        except requests.exceptions.ConnectionError:
            self.sync_errors += 1
            self.log.error('Connection error in _get_changes')

            # Retry @TODO see if retry should be in populate and sync
            if NIF_SYNC_MAX_ERRORS >= self.sync_errors:
                time.sleep(3 * self.sync_errors)
                self._get_changes(start_date, end_date)
        except TypeError:
            self.log.debug('TypeError: Empty change messages list ({})'.format(
                self.sync_type))
        except Exception as e:
            self.sync_errors += 1
            self.log.exception('Exception in _get_changes')
            # @TODO Need to verify if this is reason to warn somehow??

        return False

    def sync(self) -> None:
        """This method is the job run by the scheduler when last change message is < NIF_POPULATE_INTERVAL.

        If a job misfires, then on next run the interval to sync will be twice.

        .. note::
            Checks if :py:attr:`.sync_errors` > :py:attr:`.sync_errors_max` and if so it will set :py:attr:`._stopper`
            for this thread and will run :py:meth:`._stopper` as it always checks, which in turn calls
            :py:meth:`._shutdown` and terminates the thread.
        """

        self.state.set_state(mode='sync', state='running')

        # Check if stopper is set
        self._stopper()

        self.log.debug('Getting sync messages')

        if self.initial_start is not None:
            end = datetime.utcnow().replace(tzinfo=self.tz_utc)
            start = self.initial_start + timedelta(
                seconds=self.initial_timedelta)

            self.log.debug('From:   {0}'.format(
                start.astimezone(self.tz_local).isoformat()))
            self.log.debug('To:     {0}'.format(
                end.astimezone(self.tz_local).isoformat()))

            if end > start:
                if self._get_changes(start, end):
                    self.initial_start = end
            else:
                self.log.error('Inconsistence between dates')
        else:
            end = datetime.utcnow().replace(tzinfo=self.tz_utc)
            self.initial_start = end - timedelta(minutes=self.sync_interval)

            self.log.debug('From:   {0}'.format(
                self.initial_start.astimezone(self.tz_local).isoformat()))
            self.log.debug('To:     {0}'.format(
                end.astimezone(self.tz_local).isoformat()))

            if end > self.initial_start:
                if self._get_changes(self.initial_start, end):
                    self.initial_start = end
            else:
                self.log.error('Inconsistence between dates')

        self.state.set_state(mode='sync', state='sleeping')

    def populate(self, sync_after=True) -> None:
        """Populates change messages from :py:attr:`.org_created` until last change message timedelta is less than
        :py:attr:`.populate_interval` from which it will exit and start :py:attr:`scheduler`.

        .. attention::
            :py:meth:`populate` requires a slot in the connectionpool. Getting a slot requires acquiring
            :py:attr:`lock`. Number of slots available is set in :py:mod:`syncdaemon` on startup.
        """
        self.state.set_state(mode='populate', state='initializing')
        self.log.debug('Populate, interval of {0} hours...'.format(
            self.populate_interval))

        # Initial
        if self.initial_start is None:
            end_date = self.org_created
        else:
            end_date = self.initial_start

        start_date = end_date - timedelta(hours=self.populate_interval)

        # Populate loop
        while end_date < datetime.utcnow().replace(
                tzinfo=self.tz_utc) + timedelta(hours=self.populate_interval):

            # Check stopper
            self._stopper()

            # Aquire lock and run!
            self.log.debug('Waiting for slot in connectionpool...')
            self.state.set_state(state='waiting', reason='connection pool')

            with self.lock:  # .acquire(blocking=True):
                self.state.set_state(state='running')
                # Check stopper, might have waited long time
                self._stopper()

                # Overlapping end date
                if end_date > datetime.utcnow().replace(tzinfo=self.tz_utc):
                    end_date = datetime.utcnow().replace(tzinfo=self.tz_utc)

                    self.log.debug(
                        'Getting last changes between {0} and {1}'.format(
                            start_date.astimezone(self.tz_local).isoformat(),
                            end_date.astimezone(self.tz_local).isoformat()))

                    if self._get_changes(start_date, end_date) is True:
                        # Last populate
                        break  # Break while

                else:
                    self.log.debug(
                        'Getting changes between {0} and {1}'.format(
                            start_date.astimezone(self.tz_local).isoformat(),
                            end_date.astimezone(self.tz_local).isoformat()))

                    if self._get_changes(start_date, end_date) is True:
                        # Next iteration
                        start_date = end_date
                        end_date = end_date + timedelta(
                            hours=self.populate_interval)

                    time.sleep(0.1)  # Grace before we release lock

        # Since last assignment do not work, use last end_date = start_date for last iteration
        self.initial_start = start_date
        self.state.set_state(mode='populate',
                             state='finished',
                             reason='ended populate')

        if sync_after is True:
            self.log.debug('Starting sync scheduler...')
            self.scheduler.start()
            self.state.set_state(mode='sync',
                                 state='started',
                                 reason='starting after populate')

@scheduler.scheduled_job(trigger='cron',
                         id='task_run_dbIPCheck',
                         minute='*/10')
def task_run_dbIPCheck():
    '''win 下不会执行函数,因为 grequests 库gevent导致,Linux正常'''
    cmd = 'python {}'.format(str(os.path.join(curre_path, 'run_dbCheck.py')))
    print('-----ip校验--------', cmd)
    subprocess.Popen(cmd, shell=True)


# 监听
def listen_task(event):
    if event.exception:
        print('**********启动出错*********')
    else:
        print('**********启动任务*********')


scheduler.add_listener(listen_task, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

scheduler.start()
'''

step-1: 修改 model.py  mysql连接
setp-2:  nohup python -u tasks_apscheduler.py &


'''
Exemple #23
0
    logger.info("等待任务正常结束,请勿强制关闭,避免出现数据丢失!!!")
    tool.settingConf.save()
    job.shutdown()
    sys.exit(0)


def handleException(exp):
    logger = tool.getLogger()
    logger.error("", exc_info=True)


# signal.signal(signal.SIGINT, exits)
# signal.signal(signal.SIGTERM, exits)

# 截取失败记录,方便debug
job.add_listener(handleException, events.EVENT_JOB_ERROR)

# 获取视频任务定时任务
job.add_job(main_upload.jobProducer,
            **tool.settingConf["Scheduler"]["Video"],
            next_run_time=datetime.datetime.now())

# 定时确定视频消费者线程是否正常运行
job.add_job(main_upload.jobConsumer,
            trigger="interval",
            minutes=2,
            next_run_time=datetime.datetime.now())

# 字幕的定时任务暂时不导入
# job.add_job(main_sub.run, **tool.settingConf["Scheduler"]["Subtitle"])
Exemple #24
0
    # Copy to PROD
    print('Copy to PROD')
    firebase.delete('/PROD', None)
    stgResults = firebase.get(stgTable, None)
    firebase.patch('/PROD', stgResults)
    print('Done')


sched = BlockingScheduler()
logging.basicConfig()

main()


def err_listener(event):
    print('%s' % (event))


sched.add_listener(
    err_listener, events.EVENT_SCHEDULER_START | events.EVENT_JOB_ERROR | events.EVENT_JOB_MISSED)


@sched.scheduled_job('interval', minutes=5)
def timed_job():
    try:
        main()
    except Exception as e:
        print(e)

sched.start()
Exemple #25
0
        logging.warning('Monitor interupted, restarting..')
        scheduler.remove_all_jobs()
        getAuth()
        scheduler.add_job(getAuth, 'interval', hours=6)
        scheduler.add_job(getData,
                          'interval',
                          seconds=2,
                          max_instances=5,
                          args=[token])
        logging.warning('Monitor Restarted.')


if __name__ == '__main__':
    logging.warning('Monitor started.')
    scheduler = BlockingScheduler()
    getAuth()
    scheduler.add_job(getAuth, 'interval', hours=6)
    scheduler.add_job(getData,
                      'interval',
                      seconds=2,
                      max_instances=5,
                      args=[token])
    scheduler.add_listener(listener, EVENT_JOB_ERROR)
    scheduler.start()
    try:
        while True:
            continue
    except (KeyboardInterrupt, SystemExit):
        logging.warning('Monitor forced stopped.')
        scheduler.shutdown(wait=False)
Exemple #26
0
# end_date=None,timezone=None)
# hour = 19, minute = 23
# hour = '19', minute = '23'
# minute = '*/5' 表示每5分钟执行一次
# hour = '19-21', minute='23' 表示 19:23;20:23;21:23各执行一次
# scheduler.add_job(func=qyt_print, args=['test1', 'test2'],
#                   trigger='cron', hour=10, minute=14, id='cron调度!测试正常打印!')
# date:只在某个时间点执行一次run_date(datetime|str)
# scheduler.add_job(func=qyt_print, args=['test1', 'test2'],
#                   trigger='date', run_date=datetime(2019, 3, 26, 10, 17),id='date调度!测试正常打印!')

# interval:每隔一段时间执行一次week=0 | days=0 | hours=0 | minutes=0 | seconds=0, start_date=None,
# end_date=None, timezone=None
scheduler.add_job(func=write_config_md5_to_db,
                  args=(),
                  trigger='interval',
                  minutes=1,
                  start_date=datetime(2019, 7, 25, 9, 35),
                  end_date=datetime(2019, 7, 25, 10, 3),
                  id='interval调度!获取配置并写入到数据库')

# 加载时间处理函数
scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
# 记录日志
scheduler._logger = logging
# 开始调度
try:
    scheduler.start()
except KeyboardInterrupt:
    print('收到停止调度命令!正在退出!')
Exemple #27
0
class ApsPlanner(BasePlanner):
    """
    Planner implementing scheduling using the |APS|_. Scheduling sets the :any:`APS Job <apscheduler.job.Job>` as links' job.

    .. |APS| replace:: Advanced Python Scheduler
    .. _APS: https://apscheduler.readthedocs.io/en/stable/index.html
    .. _configuring-scheduler: https://apscheduler.readthedocs.io/en/stable/userguide.html#configuring-the-scheduler

    """
    def __init__(self,
                 links: Union[Link, List[Link]] = None,
                 threads: int = 30,
                 executors_override: dict = None,
                 job_defaults_override: dict = None,
                 ignore_exceptions: bool = False,
                 catch_exceptions: bool = None,
                 immediate_transfer: bool = True):
        """

        :type links: :any:`Link` or list[:any:`Link`]
        :param links: Links that should be added and scheduled.
            |default| :code:`None`

        :type threads: int
        :param threads: Number of threads available for job execution. Each link will be run on a separate thread job.
            |default| :code:`30`

        :type executors_override: dict
        :param executors_override: Overrides for executors option of `APS configuration <configuring-scheduler_>`__
            |default| :code:`None`

        :type job_defaults_override: dict
        :param job_defaults_override: Overrides for job_defaults option of `APS configuration <configuring-scheduler_>`__
            |default| :code:`None`

        :type ignore_exceptions: bool
        :param ignore_exceptions: Whether exceptions should be ignored or halt the planner.
            |default| :code:`False`

        :type immediate_transfer: :class:`bool`
        :param immediate_transfer: Whether planner should execute one transfer immediately upon starting. |default| :code:`True`
        """

        self._threads = threads

        if executors_override is None:
            executors_override = {}
        if job_defaults_override is None:
            job_defaults_override = {}

        executors = {
            'default': ThreadPoolExecutor(threads),
            **executors_override
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': threads,
            **job_defaults_override
        }

        self._scheduler = BlockingScheduler(executors=executors,
                                            job_defaults=job_defaults,
                                            timezone='UTC')
        # self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc)
        self._scheduler.add_listener(self._exception_listener, EVENT_JOB_ERROR)

        self.links_by_jobid = {}

        super().__init__(links=links,
                         ignore_exceptions=ignore_exceptions,
                         immediate_transfer=immediate_transfer)

        if catch_exceptions is not None:  # pragma: no cover
            self._ignore_exceptions = catch_exceptions
            warnings.warn(
                '\'catch_exceptions\' was renamed to \'ignore_exceptions\' in version 0.2.0 and will be permanently changed in version 1.0.0',
                DeprecationWarning)

    def _exception_listener(self, event):
        if event.code is EVENT_JOB_ERROR:
            self._on_exception(event.exception,
                               self.links_by_jobid[event.job_id])

    def _schedule(self, link: Link):
        """
        Schedule a link. Sets :any:`APS Job <apscheduler.job.Job>` as this link's job.

        :type link: :any:`Link`
        :param link: Link to be scheduled
        """

        job = self._scheduler.add_job(
            link.transfer,
            trigger=IntervalTrigger(seconds=link.interval.total_seconds()))
        link.set_job(job)
        self.links_by_jobid[job.id] = link

    def _unschedule(self, link: Link):
        """
        Unschedule a link.

        :type link: :any:`Link`
        :param link: Link to be unscheduled
        """
        if link.job is not None:
            link.job.remove()
            self.links_by_jobid.pop(link.job.id, None)
            link.set_job(None)

    def start(self):
        """
        Start this planner. Calls :any:`APS Scheduler.start() <apscheduler.schedulers.base.BaseScheduler.start>`

        See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
        """
        super().start()

    def _start_planner(self):
        self._scheduler.start()

    def pause(self):
        """
        Pause this planner. Calls :any:`APScheduler.pause() <apscheduler.schedulers.base.BaseScheduler.pause>`
        """
        _LOGGER.info('Pausing %s' % str(self))
        self._scheduler.pause()

    def resume(self):
        """
        Resume this planner. Calls :any:`APScheduler.resume() <apscheduler.schedulers.base.BaseScheduler.resume>`
        """
        _LOGGER.info('Resuming %s' % str(self))
        self._scheduler.resume()

    def shutdown(self, wait: bool = True):
        """
        Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`

        See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.

        :type wait: bool
        :param wait: Whether to wait until all currently executing jobs have finished.
            |default| :code:`True`
        """
        super().shutdown(wait)

    def _shutdown_planner(self, wait: bool = True):
        """
        Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`

        :type wait: bool
        :param wait: Whether to wait until all currently executing jobs have finished.
            |default| :code:`True`
        """
        self._scheduler.shutdown(wait=wait)

    def purge(self):
        """
        Unschedule and clear all links. It can be used while planner is running. APS automatically removes jobs, so we only clear the links.
        """
        for link in self.links:
            self.links_by_jobid.pop(link.job.id, None)
            try:
                link.job.remove()
            except JobLookupError:
                pass  # APS already removed jobs if shutdown was called before purge, otherwise let's do it ourselves
            link.set_job(None)

        self._links = []

    @property
    def running(self):
        """
        Whether this planner is currently running. Changed by calls to :any:`start` and :any:`shutdown`.


        :return: State of this planner
        :rtype: bool
        """
        return self._scheduler.state == STATE_RUNNING

    def __repr__(self):
        return 'ApsPlanner(threads:%s)' % (self._threads)
Exemple #28
0
def validate_possible_block(possible_block_dict):

  possible_block = Block(possible_block_dict)
  if possible_block.is_valid():
    possible_block.self_save()

    #we want to kill and restart the mining block so it knows it lost
    sched.print_jobs()
    try:
      sched.remove_job('mining')
      print "removed running mine job in validating possible block"
    except apscheduler.jobstores.base.JobLookupError:
      print "mining job didn't exist when validating possible block"

    print "readding mine for block validating_possible_block"
    print sched
    print sched.get_jobs()
    sched.add_job(mine_for_block, kwargs={'rounds':STANDARD_ROUNDS, 'start_nonce':0}, id='mining') #add the block again
    print sched.get_jobs()

    return True
  return False


if __name__ == '__main__':

  sched.add_job(mine_for_block, kwargs={'rounds':STANDARD_ROUNDS, 'start_nonce':0}, id='mining') #add the block again
  sched.add_listener(mine_for_block_listener, apscheduler.events.EVENT_JOB_EXECUTED)
  sched.start()

Exemple #29
0
class GetEpidemicData(object):
    """docstring for GetEpidemicData"""
    def __init__(self, np, url):
        self.conn = cx.connect(np)
        self.cur = self.conn.cursor()
        self.url = url
        self.logger = make_log('us_epidemic', 'github_epidemic')

        # 创建调度器:BlockingScheduler
        self.scheduler = BlockingScheduler()
        # 代理
        self.proxy = 'zzy:[email protected]:80'
        self.proxy_handler = {
            'http': 'http://' + proxy,
            'https': 'https://' + proxy
        }

    # 无代理
    def get_data():
        firefox_headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
        }
        # response = urlopen(url=url, timeout=60)
        request = Request(self.url, headers=firefox_headers)
        response = urlopen(request)
        # 字节转换字符串
        content = response.read().decode().replace(u'\xf1',
                                                   u'').replace(u"'", u"")
        return content

    # 有代理
    def get_data_proxy(self):
        firefox_headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
        }
        request = Request(self.url, headers=firefox_headers)
        # 创建一个ProxyHandler对象
        proxy_support = urllib.request.ProxyHandler(self.proxy_handler)
        # 创建一个opener对象
        opener = urllib.request.build_opener(proxy_support)
        # 给request装载opener
        urllib.request.install_opener(opener)
        response = urlopen(request)
        # 字节转换字符串
        content = response.read().decode().replace(u'\xf1',
                                                   u'').replace(u"'", u"")
        return content

    def parse_data(self):
        self.logger.info(' --- 数据库连接 --- ')
        try:
            # 获取当前最晚同步次数
            s_sql = '''SELECT MAX(SYNC_NUMBER) FROM BASE_00070_NCOV_US'''
            self.cur.execute(s_sql)
            max_sn = cur.fetchone()[0]
            if max_sn == None:
                max_sn = 1
            else:
                max_sn += 1

            us_data = get_data_proxy(url)

            data_list = us_data.split('"')

            for i in range(len(data_list)):
                data = data_list[i].split(',')
                if 'US' in data:
                    if len(data) == 23:
                        area = data[12]
                        province_state = data[13]
                        country_region = data[14]
                        sys_date = data[15]
                        confirm = int(data[18])
                        dead = int(data[19])
                        heal = int(data[20])
                        active = int(data[21])

                    elif len(data) == 12:
                        area = data[1]
                        province_state = data[2]
                        country_region = data[3]
                        sys_date = data[4]
                        confirm = int(data[7])
                        dead = int(data[8])
                        heal = int(data[9])
                        active = int(data[10])

                    else:
                        continue

                    if confirm == 0:
                        heal_rate = '%.2f' % 0.00
                        dead_rate = '%.2f' % 0.00
                    else:
                        heal_rate = '%.2f' % ((heal / confirm) * 100)
                        dead_rate = '%.2f' % ((dead / confirm) * 100)
                    # print(area, province_state, country_region, sys_date, confirm, dead, heal, active, heal_rate, dead_rate)

                    sql = '''
                        INSERT INTO BASE_00070_NCOV_US(AREA, PROVINCE_STATE, COUNTRY_REGION, CONFIRM_VALUE, ACTIVE, DEAD_VALUE, HEAL_VALUE, DEAD_RATE, HEAL_RATE, SYNC_NUMBER, SYNC_TIME)
                        VALUES('{0}', '{1}', '{2}', {3}, {4}, {5}, {6}, {7}, {8}, {9}, TO_DATE('{10}','YYYY-MM-DD HH24:MI:SS'))
                    '''.format(area, province_state, country_region, confirm,
                               active, dead, heal, dead_rate, heal_rate,
                               max_sn, sys_date)

                    self.logger.info(sql)

                    self.cur.execute(sql)

                else:
                    continue

            # 提交
            self.conn.commit()

        except Exception as e:
            self.logger.info(' --- 操作异常 --- ')
            self.logger.error(str(e))
            self.conn.rollback()

        finally:
            # 资源关闭
            self.cur.close()
            self.conn.close()
            self.logger.info(' --- 数据库关闭 --- ')

    def create_scheduler(self):
        try:
            parse_data()

            self.logger.info(' --- 定时任务开启 ---')

            self.scheduler.add_job(func=get_data_proxy,
                                   trigger=IntervalTrigger(seconds=10,
                                                           id='yq_job'))

            # 添加任务,时间间隔1小时
            self.scheduler.add_job(func=parse_data,
                                   trigger=IntervalTrigger(seconds=10,
                                                           id='yq_job'))
            # self.scheduler.add_job(func=parse_data,trigger=CronTrigger(hour=8))

            # 添加监听
            self.scheduler.add_listener(scheduler_listener,
                                        EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

            # 开启任务
            self.scheduler.start()

        except Exception as e:
            self.logger.info(' --- 定时任务失败 ---')
            self.logger.error(str(e))

    def remove_scheduler(self, id):
        scheduler.remove_job('interval_task')

    def scheduler_listener(self, event):
        print(event)
        if event.exception:
            print('任务出错了!!!!!!')
        else:
            print('任务正常常运行...')
                      arrow.get(job_.next_run_time).humanize(locale='zh'))
        if job_.id == 'resume_torrent':
            print(arrow.now().format(), '下一次恢复torrent时间:',
                  arrow.get(job_.next_run_time).humanize(locale='zh'))


# cron_bilibili_take_in = CronTrigger(hour='5')
cron_search_flv_transform = CronTrigger(hour='4')
del_resume_torrent_counter = Counter()
corn_print_job = CronTrigger(hour='*/3')
corn_del_empty_folder = CronTrigger(minute='*/5')
corn_del_resume_torrent = IntervalTrigger(
    minutes=del_resume_torrent_counter.interval_minutes)

scheduler = BlockingScheduler()
scheduler.add_listener(runtime_listener,
                       EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
# scheduler.add_job(daily_take_in, cron_bilibili_take_in, [r"C:\LiveRecord\22128636", 'metadata'], coalesce=True, misfire_grace_time=60, id='bilibili_take_in')
scheduler.add_job(search_flv_transform,
                  cron_search_flv_transform, [
                      "//192.168.123.44/LiveRecord/22128636-OakNose",
                      "//192.168.123.44/LiveRecord/7969549-暂停实验室"
                  ],
                  coalesce=True,
                  misfire_grace_time=60)
scheduler.add_job(print_job,
                  corn_print_job, (scheduler, ),
                  coalesce=True,
                  misfire_grace_time=60)
# scheduler.add_job(del_empty_folder, corn_del_empty_folder, [r"C:\btdownload"], misfire_grace_time=5)
# scheduler.add_job(resume_torrent, corn_del_resume_torrent, [scheduler, del_resume_torrent_counter], misfire_grace_time=10, id='resume_torrent')
Exemple #31
0
class ScrapeScheduler(Executor):
    def __init__(self):
        Executor.register(ScrapeScheduler)
        self._logger = logging.getLogger(type(self).__name__)
        self._scheduler = None
        self._executor = None
        self._job_list = list()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._executor.shutdown()

    def register_cache(self, cache_name):
        self._cache.register_catelog(cache_name)

    def register_job(self, job: BaseJob):
        '''
        Default to run every minute
        :param job:
        :return:
        '''
        self._job_list.append(job)

    def _create_executors(self):
        max_instances = len(
            [job for job in self._job_list if job.schedule_type != 'one-off'])
        max_instances = LogicUtil.if_else_default(max_instances, 1,
                                                  lambda x: x > 0)
        job_defaults = {'max_instances': max_instances}
        self._scheduler = BlockingScheduler(job_defaults=job_defaults)
        self._executor = TaskExecutor()

    def _add_hooks(self):
        _self = self

        def shutdown_hook(event):
            e = event.exception
            if e:
                _self._logger.error(
                    f'{Formatter.get_timestamp()} - Scheduler crashed!, {type(e)} - {e}'
                )
                if isinstance(e, KeyboardInterrupt):
                    if None is not _self._scheduler:
                        _self._scheduler.remove_all_jobs()
                        _self._scheduler.shutdown()
                    if None is not _self._executor:
                        _self._executor.shutdown()

        self._scheduler.add_listener(shutdown_hook, EVENT_JOB_ERROR)

    def _add_registred_jobs(self):
        for job in self._job_list:
            if job.schedule_type == "sec":
                self._scheduler.add_job(job.run,
                                        'interval',
                                        id=type(job).__name__,
                                        seconds=job.sec)
            elif job.schedule_type == "cron":
                self._scheduler.add_job(job.run,
                                        CronTrigger.from_crontab(job.cron),
                                        id=type(job).__name__)
            elif job.schedule_type == "one-off":
                self._scheduler.add_job(job.run, id=type(job).__name__)

    def start(self):
        # Lazy init
        self._create_executors()
        self._add_hooks()
        self._add_registred_jobs()

        self._scheduler.start()

    def shutdown(self):
        self._scheduler.shutdown()