def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node( ) if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered( self, {1: self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % ( self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def __init__(self, refresh_rate=900.0): ImmortalThread.__init__(self, name='RAFD') if debug: print 'init RAFD Server ' self.semaphore = Semaphore(0) self._scheduler = scheduler self.refresh_rate = refresh_rate scheduler.seconds_from_now_do(30.0, self._tick)
def proxy_active_set(self, dummy): #print 'active proxy event' self._proxy_active_thread_lock.acquire( ) #only one set at a time is active try: try: event = None self._proxy_active_lock.acquire() try: event = self._proxy_active_event self._proxy_active_event = None if self._proxy_trigger_counter: self._proxy_trigger_counter -= 1 finally: self._proxy_active_lock.release( ) #allow any new covs while we do the set if event: #pending event if self.debug: print str(event) value = event.results()[1]['value'] if isinstance(value, Exception): raise value try: #to set() value on destination node self._proxy_active_destination.set( value) #don't know how long this will take self._proxy_set_exception(None) #failure in attempt to set data, maybe node is not ready yet, try again later except (ETimeout, EConnectionError, ENotStarted, ENoSuchNode): #put the event back in the active event if no new one has come in while we were trying to set() self._proxy_active_lock.acquire() try: if self._proxy_active_event is None: self._proxy_active_event = event #put it back in for next attempt unless a new one came in finally: self._proxy_active_lock.release( ) #allow any new covs while we do the set scheduler.seconds_from_now_do( 60, self._proxy_trigger_queue ) #try again in one minute raise #re-raise the set() exception except: raise if self.debug: print 'proxy_active_set call set returned' except Exception, e: try: self._proxy_set_exception(e) # we have squashed the exception # we want to log exceptions that are potential bugs # but we don't want to fill the msglog with ETimeouts if not isinstance(e, ETimeout): msglog.exception() except: # if there is a bug in the set_exception method we want # to see this otherwise it makes debugging difficult msglog.exception() finally: self._proxy_active_thread_lock.release() if self.debug: print 'proxy_active_set done' pass
def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node() if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered(self, {1:self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % (self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def _complete( self ): try: self._ipcheck() except: msglog.exception() if self.isRunning and self.period: # Schedule another run in self.period seconds scheduler.seconds_from_now_do( self.period, self.go )
def _start_ticking(self): if debug > 4: print 'start ticking' self.semaphore = Semaphore(0) self._scheduler = scheduler scheduler.seconds_from_now_do(1.0, self._tick) # @fixme Switching to Thread object caused # mpx/lib/bacnet/_test_case_bvlc.py to hang after completing. # Figure out why, and switch over the Thread object. threading._start_new_thread(self._tock, ())
def _start_ticking(self): if debug > 4: print 'start ticking' self.semaphore = Semaphore(0) self._scheduler = scheduler scheduler.seconds_from_now_do(1.0, self._tick) # @fixme Switching to Thread object caused # mpx/lib/bacnet/_test_case_bvlc.py to hang after completing. # Figure out why, and switch over the Thread object. threading._start_new_thread(self._tock,())
def start( self ): if self.enable: msglog.log( self.log_name, msglog.types.INFO, "STARTING %s, period = %d" % (self.name, self.period) ) if not self.isRunning: self.isRunning = 1 # Wait for a bit to give time for a possible PPP connection # to be brought up. scheduler.seconds_from_now_do( 90, self.go ) else: raise EAlreadyRunning
def proxy_active_set(self, dummy): #print 'active proxy event' self._proxy_active_thread_lock.acquire() #only one set at a time is active try: try: event = None self._proxy_active_lock.acquire() try: event = self._proxy_active_event self._proxy_active_event = None if self._proxy_trigger_counter: self._proxy_trigger_counter -= 1 finally: self._proxy_active_lock.release() #allow any new covs while we do the set if event: #pending event if self.debug: print str(event) value = event.results()[1]['value'] if isinstance(value, Exception): raise value try: #to set() value on destination node self._proxy_active_destination.set(value) #don't know how long this will take self._proxy_set_exception(None) #failure in attempt to set data, maybe node is not ready yet, try again later except (ETimeout, EConnectionError, ENotStarted, ENoSuchNode): #put the event back in the active event if no new one has come in while we were trying to set() self._proxy_active_lock.acquire() try: if self._proxy_active_event is None: self._proxy_active_event = event #put it back in for next attempt unless a new one came in finally: self._proxy_active_lock.release() #allow any new covs while we do the set scheduler.seconds_from_now_do(60, self._proxy_trigger_queue) #try again in one minute raise #re-raise the set() exception except: raise if self.debug: print 'proxy_active_set call set returned' except Exception, e: try: self._proxy_set_exception(e) # we have squashed the exception # we want to log exceptions that are potential bugs # but we don't want to fill the msglog with ETimeouts if not isinstance(e, ETimeout): msglog.exception() except: # if there is a bug in the set_exception method we want # to see this otherwise it makes debugging difficult msglog.exception() finally: self._proxy_active_thread_lock.release() if self.debug: print 'proxy_active_set done' pass
def _prune_legacy_ph_schedules(self): if not self._ph_scheds_loaded(): self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules ) else: self._prune_schedules(self.__ph_legacy_needs_pruning)
def start(self): super(Kwh2Kw, self).start() self.running = True self._history = KwList(self._window_size) self._sid = SM.create_polled({self._nid: self.link}) # retrieve an initial value to start things off value = ts = None result = SM.poll_all(self._sid) if result is None: # still waiting try: value = as_node(self.link).get() ts = time.time() except: pass else: try: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] except: pass if isinstance(value, MpxException): value = None if value and ts: self._history.add(value, ts) self._scheduled = scheduler.seconds_from_now_do( self.sample_period, self.run_update) return
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log( 'Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do( self.sample_period, self.run_update) return
def start(self): super(Kwh2Kw, self).start() self.running = True self._history = KwList(self._window_size) self._sid = SM.create_polled({self._nid:self.link}) # retrieve an initial value to start things off value = ts = None result = SM.poll_all(self._sid) if result is None: # still waiting try: value = as_node(self.link).get() ts = time.time() except: pass else: try: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] except: pass if isinstance(value, MpxException): value = None if value and ts: self._history.add(value, ts) self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log('Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def _schedule(self, dummy=None): if self.isStarted: # Wake up every period seconds and tickle our children so that they can keep # on top of resource usage. self.sched_id = scheduler.seconds_from_now_do(self.period, self._schedule, None) for x in self.children_nodes(): if hasattr(x, "tickle"): x.tickle()
def _schedule(self): if self.running: self.poll_freq self._scheduled = scheduler.seconds_from_now_do( self.poll_freq, self._trigger_queue ) return
def _schedule(self, dummy=None): if self.isStarted: # Wake up every period seconds and tickle our children so that they can keep # on top of resource usage. self.sched_id = scheduler.seconds_from_now_do( self.period, self._schedule, None) for x in self.children_nodes(): if hasattr(x, 'tickle'): x.tickle()
def _schedule( self, period = None ): if period == None: period = self.period retval = self._check_status() if retval: period = 2 if self.isStarted: # Wake up every period seconds and check to see if we # need to change our status. self.sched_id = scheduler.seconds_from_now_do( period, self._schedule, None )
def _distribute_results(self, rqst, rsp): address = rqst._address cmd = rqst._cmd callback_key = (address, cmd) min_freq = 100000 # arbitrarily large poll freq. self.__callback_lock.acquire() try: callback_list = self.__callbacks.get(callback_key) if callback_list: for cb, freq in callback_list: try: cb(rsp) except: msglog.exception() if freq < min_freq: # used to establish when we will send another request min_freq = freq #@fixme - add delay scheduler.seconds_from_now_do(min_freq, self.send_request, address, cmd, self._distribute_results) finally: self.__callback_lock.release()
def _distribute_results(self, rqst, rsp): address = rqst._address cmd = rqst._cmd callback_key = (address, cmd) min_freq = 100000 # arbitrarily large poll freq. self.__callback_lock.acquire() try: callback_list = self.__callbacks.get(callback_key) if callback_list: for cb, freq in callback_list: try: cb(rsp) except: msglog.exception() if freq < min_freq: # used to establish when we will send another request min_freq = freq #@fixme - add delay scheduler.seconds_from_now_do( min_freq, self.send_request, address, cmd, self._distribute_results ) finally: self.__callback_lock.release()
def _load_ctlsvc_schedules(self): if self._ctl_svc_running(): sched_holder = None try: sched_holder = as_node(self.sched_holder).get_child('TIM') except: self._ph_loaded = True if sched_holder: sched_holder.event_subscribe(self, ScheduleCreatedEvent) self._load_schedule_group(sched_holder, CtlSvcDelegatedHierarchialScheduler) self._ph_loaded = True return self._ph_loader_scheduled = sys_scheduler.seconds_from_now_do( 60, self._load_ctlsvc_schedules)
def _load_ctlsvc_schedules(self): if self._ctl_svc_running(): sched_holder = None try: sched_holder = as_node(self.sched_holder).get_child('TIM') except: self._ph_loaded = True if sched_holder: sched_holder.event_subscribe(self, ScheduleCreatedEvent) self._load_schedule_group( sched_holder, CtlSvcDelegatedHierarchialScheduler ) self._ph_loaded = True return self._ph_loader_scheduled = sys_scheduler.seconds_from_now_do( 60, self._load_ctlsvc_schedules )
def _load_remote_hosts(self, remote_hosts): failed = [] for host in remote_hosts: try: sched_manager = host.as_remote_node('/services/Schedule Manager') self._load_remote_schedules(sched_manager, host) except: message = 'Unable to load remote schedules from host: %s' % host.name self.message(message) msglog.exception() failed.append(host) self.remotes_loaded = True #@fixme - convert to an event based approach, triggered by changes #downstream. if remote_hosts: #failed: #self._hm_scheduled = sys_scheduler.seconds_from_now_do( # 60, self.load_remote_hosts, failed #) self._hm_scheduled = sys_scheduler.seconds_from_now_do( 300, self.load_remote_hosts, remote_hosts )
def _load_remote_hosts(self, remote_hosts): failed = [] for host in remote_hosts: try: sched_manager = host.as_remote_node( '/services/Schedule Manager') self._load_remote_schedules(sched_manager, host) except: message = 'Unable to load remote schedules from host: %s' % host.name self.message(message) msglog.exception() failed.append(host) self.remotes_loaded = True #@fixme - convert to an event based approach, triggered by changes #downstream. if remote_hosts: #failed: #self._hm_scheduled = sys_scheduler.seconds_from_now_do( # 60, self.load_remote_hosts, failed #) self._hm_scheduled = sys_scheduler.seconds_from_now_do( 300, self.load_remote_hosts, remote_hosts)
def do_start(self): self.message('Schedule Manager starting.') schedule_ph_prune = False scheds = PERSISTANCE_MANAGER.get_scheds() proxy_prune_list = [] for sched in scheds: node_info = {} try: node_info = PERSISTANCE_MANAGER.get_sched_cfg(sched) if node_info.get('factory').count( 'ProxiedHierarchialScheduler'): host_url = node_info.get('cfg').get('host_url') try: as_node(host_url) except ENoSuchName: proxy_prune_list.append(sched) continue sched_node = create_node(node_info) uuid = node_info.get('cfg').get('uuid') if not uuid or uuid == 'None': # uuid was added later - below code to deal with persisting # of that property. PERSISTANCE_MANAGER.put_sched(sched_node.as_node_url(), serialize_node(sched_node)) #except: # msglog.exception() # continue if not isinstance(sched_node, ProxiedHierarchialScheduler): # proxied schedules store locally. Restore summary, properties # and meta for local. url = sched_node.as_node_url() properties = PERSISTANCE_MANAGER.get_sched_props(url) if properties: sched_node.set_properties(properties, save=False) meta = PERSISTANCE_MANAGER.get_sched_meta(url) if meta: sched_node.set_event_meta(meta) if not isinstance(sched_node, (DelegatedHierarchialScheduler, CtlSvcDelegatedHierarchialScheduler)): sched_node._set_summary( PERSISTANCE_MANAGER.get_sched_summary(url)) sched_node.set_override( PERSISTANCE_MANAGER.get_override(url)) if isinstance(sched_node, DelegatedHierarchialScheduler): # keep track of the "legacy" schedules we are delegating to delegate = sched_node.configuration().get('delegate') try: # see if the target still exists. as_node(delegate) self.__legacy_schedules.append(delegate) except: # the legacy schedule disappeared on us. # schedule it for removal, iff it doesn't have children if isinstance(sched_node, CtlSvcDelegatedHierarchialScheduler): schedule_ph_prune = True self.__ph_legacy_needs_pruning.append(sched_node) else: self.__legacy_needs_pruning.append(sched_node) elif isinstance(sched_node, ProxiedHierarchialScheduler): host_url = sched_node.host_url uuid = sched_node.configuration().get('uuid') self._proxied_manager.register_persisted( host_url, uuid, sched_node) try: sched_node.start() except: msglog.exception() except: msglog.exception() #continue #LOOP ENDS proxy_prune_list.sort(sched_sort) for sched in proxy_prune_list: msg = 'Removing schedule %s for non existent host.' % urllib.unquote( sched) self.message(msg, level=0) PERSISTANCE_MANAGER.remove_sched(sched) self._load_schedules() self._prune_schedules(self.__legacy_needs_pruning) try: remote_hosts = self.host_manager.children_nodes() except: remote_hosts = [] self.load_remote_hosts(remote_hosts) if schedule_ph_prune: # there's control service scheduled to care about. self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules) self.__running = True
def _prune_legacy_ph_schedules(self): if not self._ph_scheds_loaded(): self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules) else: self._prune_schedules(self.__ph_legacy_needs_pruning)
def _schedule( self ): scheduler.seconds_from_now_do( self._period, self.go )
def _tick(self): self.semaphore.release() scheduler.seconds_from_now_do(1.0, self._tick) if debug > 4: print 'FDT tick'
def _tick(self): #also call this start ticking after a stop self.semaphore.release() scheduler.enabled = 1 scheduler.seconds_from_now_do(self.refresh_rate, self._tick) if debug > 4: print 'RAFD tick'
def _schedule( self, dummy = None ): self._msglog( 'setting led, state = %s' % self.stateName ) apply( self.stateSetFunction, [] ) if self.isStarted: self.sched_id = scheduler.seconds_from_now_do( 50, self._schedule, None )
def do_start(self): self.message('Schedule Manager starting.') schedule_ph_prune = False scheds = PERSISTANCE_MANAGER.get_scheds() proxy_prune_list = [] for sched in scheds: node_info = {} try: node_info = PERSISTANCE_MANAGER.get_sched_cfg(sched) if node_info.get('factory').count('ProxiedHierarchialScheduler'): host_url = node_info.get('cfg').get('host_url') try: as_node(host_url) except ENoSuchName: proxy_prune_list.append(sched) continue sched_node = create_node(node_info) uuid = node_info.get('cfg').get('uuid') if not uuid or uuid == 'None': # uuid was added later - below code to deal with persisting # of that property. PERSISTANCE_MANAGER.put_sched( sched_node.as_node_url(), serialize_node(sched_node) ) #except: # msglog.exception() # continue if not isinstance(sched_node, ProxiedHierarchialScheduler): # proxied schedules store locally. Restore summary, properties # and meta for local. url = sched_node.as_node_url() properties = PERSISTANCE_MANAGER.get_sched_props(url) if properties: sched_node.set_properties(properties, save=False) meta = PERSISTANCE_MANAGER.get_sched_meta(url) if meta: sched_node.set_event_meta(meta) if not isinstance(sched_node, (DelegatedHierarchialScheduler, CtlSvcDelegatedHierarchialScheduler)): sched_node._set_summary( PERSISTANCE_MANAGER.get_sched_summary(url) ) sched_node.set_override( PERSISTANCE_MANAGER.get_override(url) ) if isinstance(sched_node, DelegatedHierarchialScheduler): # keep track of the "legacy" schedules we are delegating to delegate = sched_node.configuration().get('delegate') try: # see if the target still exists. as_node(delegate) self.__legacy_schedules.append(delegate) except: # the legacy schedule disappeared on us. # schedule it for removal, iff it doesn't have children if isinstance(sched_node, CtlSvcDelegatedHierarchialScheduler): schedule_ph_prune = True self.__ph_legacy_needs_pruning.append(sched_node) else: self.__legacy_needs_pruning.append(sched_node) elif isinstance(sched_node, ProxiedHierarchialScheduler): host_url = sched_node.host_url uuid = sched_node.configuration().get('uuid') self._proxied_manager.register_persisted(host_url, uuid, sched_node) try: sched_node.start() except: msglog.exception() except: msglog.exception() #continue #LOOP ENDS proxy_prune_list.sort(sched_sort) for sched in proxy_prune_list: msg = 'Removing schedule %s for non existent host.' % urllib.unquote(sched) self.message(msg, level=0) PERSISTANCE_MANAGER.remove_sched(sched) self._load_schedules() self._prune_schedules(self.__legacy_needs_pruning) try: remote_hosts = self.host_manager.children_nodes() except: remote_hosts = [] self.load_remote_hosts(remote_hosts) if schedule_ph_prune: # there's control service scheduled to care about. self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules ) self.__running = True
def _schedule(self): if self.running: self.poll_freq self._scheduled = scheduler.seconds_from_now_do( self.poll_freq, self._trigger_queue) return
def _schedule(self, dummy=None): self._msglog('setting led, state = %s' % self.stateName) apply(self.stateSetFunction, []) if self.isStarted: self.sched_id = scheduler.seconds_from_now_do( 50, self._schedule, None)
def schedule(self): self._scheduled = scheduler.seconds_from_now_do(self.timeout, self.run_check_timeouts)