def _proxy_get_link(self, skip_cache=0): try: if self._proxy_start_exception: #if there was an exception during start, repeat it now self._start_proxy_get() #try again until it starts ok answer = None if self.use_subscription: if (self._proxy_sid is None): self._proxy_sid = SM.create_polled( {1:self._proxy_linked_node()}, timeout=None ) answer = SM.poll_all(self._proxy_sid)[1] if isinstance(answer, dict): answer = answer['value'] if isinstance(answer, Exception): raise answer else: answer = self._proxy_linked_node().get() # @fixme skip_cache) how to use gets that dont have skip cache self._proxy_set_exception(None) return answer except Exception, e: #transfer any exception over to the proxy if hasattr(self, 'error_response'): if self.error_response != '%ERROR%': #default error response is to pass exception up if self.error_response == '%NONE%': return None return self.error_response #return specific desired value when an error occurs self._proxy_set_exception(e) raise # reraise the exception
def start(self): super(Kwh2Kw, self).start() self.running = True self._history = KwList(self._window_size) self._sid = SM.create_polled({self._nid: self.link}) # retrieve an initial value to start things off value = ts = None result = SM.poll_all(self._sid) if result is None: # still waiting try: value = as_node(self.link).get() ts = time.time() except: pass else: try: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] except: pass if isinstance(value, MpxException): value = None if value and ts: self._history.add(value, ts) self._scheduled = scheduler.seconds_from_now_do( self.sample_period, self.run_update) return
def _proxy_get_link(self, skip_cache=0): try: if self._proxy_start_exception: #if there was an exception during start, repeat it now self._start_proxy_get() #try again until it starts ok answer = None if self.use_subscription: if (self._proxy_sid is None): self._proxy_sid = SM.create_polled( {1: self._proxy_linked_node()}, timeout=None) answer = SM.poll_all(self._proxy_sid)[1] if isinstance(answer, dict): answer = answer['value'] if isinstance(answer, Exception): raise answer else: answer = self._proxy_linked_node().get( ) # @fixme skip_cache) how to use gets that dont have skip cache self._proxy_set_exception(None) return answer except Exception, e: #transfer any exception over to the proxy if hasattr(self, 'error_response'): if self.error_response != '%ERROR%': #default error response is to pass exception up if self.error_response == '%NONE%': return None return self.error_response #return specific desired value when an error occurs self._proxy_set_exception(e) raise # reraise the exception
def start(self): super(Kwh2Kw, self).start() self.running = True self._history = KwList(self._window_size) self._sid = SM.create_polled({self._nid:self.link}) # retrieve an initial value to start things off value = ts = None result = SM.poll_all(self._sid) if result is None: # still waiting try: value = as_node(self.link).get() ts = time.time() except: pass else: try: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] except: pass if isinstance(value, MpxException): value = None if value and ts: self._history.add(value, ts) self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def stop(self): self.__started = 0 Column.stop(self) self.variables = None self._calculator = None if self._sid: SM.destroy(self._sid) self._sid = None
def start(self): Column.start(self) if (type(self.__function_config) == types.StringType and string.count(self.__function_config,'as_node') == 1 and self.__function_config.endswith('get')): func = self.__function_config self.__node = as_node(func[func.find('(')+2:func.rfind(')')-1]) if self.use_subscription_manager: self._sid = SM.create_delivered(self, {1:as_node_url(self.__node)}) self.function = self.get_last else: self.function = getattr(self.__node,func[func.rfind('.')+1:]) rexec = self.parent.parent.get_environment() self.original_function = RFunction(self.function, args=self.args, context=self.context, rexec=rexec) self.function = self._convert self.variables = {} nodes = self.children_nodes() for potential_calculator in nodes: if hasattr(potential_calculator, 'evaluate'): if self._calculator: #oops raise EAttributeError('Too many calculator nodes', self) self._calculator = potential_calculator self.function = self._evaluate # hook the calculator in self.__original_function = self.original_function self.original_function = self.__evaluate_original_function self.__started = 1
def _setup_trigger(self): try: self._sid = SM.create_delivered(self, {1: as_node(self.trigger)}) except ENotStarted, ENoSuchNode: msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger msglog.log('broadway', msglog.types.WARN, msg) scheduler.seconds_from_now_do(60, self._setup_trigger)
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log('Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node( ) if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered( self, {1: self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % ( self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def start(self): Column.start(self) if (type(self.__function_config) == types.StringType and string.count(self.__function_config, 'as_node') == 1 and self.__function_config.endswith('get')): func = self.__function_config self.__node = as_node(func[func.find('(') + 2:func.rfind(')') - 1]) if self.use_subscription_manager: self._sid = SM.create_delivered(self, {1: as_node_url(self.__node)}) self.function = self.get_last else: self.function = getattr(self.__node, func[func.rfind('.') + 1:]) rexec = self.parent.parent.get_environment() self.original_function = RFunction(self.function, args=self.args, context=self.context, rexec=rexec) self.function = self._convert self.variables = {} nodes = self.children_nodes() for potential_calculator in nodes: if hasattr(potential_calculator, 'evaluate'): if self._calculator: #oops raise EAttributeError('Too many calculator nodes', self) self._calculator = potential_calculator self.function = self._evaluate # hook the calculator in self.__original_function = self.original_function self.original_function = self.__evaluate_original_function self.__started = 1
def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node() if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered(self, {1:self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % (self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def _setup_trigger(self): try: self._sid = SM.create_delivered(self, {1:as_node(self.trigger)}) except ENotStarted, ENoSuchNode: msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger msglog.log('broadway',msglog.types.WARN,msg) scheduler.seconds_from_now_do(60, self._setup_trigger)
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log( 'Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do( self.sample_period, self.run_update) return
def create_polled(self, node_reference_table=None, timeout=300): if not self._properties_loaded: self._load_properties() if node_reference_table is None: node_reference_table = {} points = self._get_points() for point in points: node_reference_table[str((point.ptype, point.name))] = point.reference return SM.create_polled(node_reference_table, timeout)
def create_polled(self, node_reference_table=None, timeout=300): if node_reference_table is None: node_reference_table = {} for mount_point, nodepath in self.get_mount_points(): try: node_reference_table[nodepath] = mount_point.host except: msg = 'Failed to establish presence monitoring for %s' % nodepath msglog.log('Entity Manager', msglog.types.WARN, msg) return SM.create_polled(node_reference_table, timeout)
def create_polled(self, node_reference_table=None, timeout=300): if not self._properties_loaded: self._load_properties() if node_reference_table is None: node_reference_table = {} points = self._get_points() for point in points: node_reference_table[str( (point.ptype, point.name))] = point.reference return SM.create_polled(node_reference_table, timeout)
def stop(self): self.running = False self._history = None try: SM.destroy(self._sid) except: pass self._sid = None self._history_lock.acquire() try: self._history = None s = self._scheduled self._scheduled = None if s is not None: try: s.cancel() except: pass finally: self._history_lock.release() return
def create_polled(self, node_reference_table=None, timeout=300): if node_reference_table is None: node_reference_table = {} for setpoint_id in self._entity_map.keys(): for entity_map in self._entity_map.get(setpoint_id, []): try: property = entity_map.get_property_reference() except: continue entity_path = entity_map.entity_path nrt_id = str([setpoint_id, entity_path]) node_reference_table[nrt_id] = property return SM.create_polled(node_reference_table, timeout)
def stop(self): if self._proxy_sid is not None: SM.destroy(self._proxy_sid) self._proxy_sid = None return
def destroy(self, sid): return SM.destroy(sid)
def poll_changed(self, poll_id): return SM.poll_changed(poll_id)
def poll_all(self, poll_id): return SM.poll_all(poll_id)
def poll_all(self, sid): return SM.poll_all(sid)
def poll_changed(self, sid): return SM.poll_changed(sid)
t.change_hardware_settings('IP','eth0','1','47428') from mpx.lib.node import as_node ai1 = as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01') aipv=as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01/present_value') ain=as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01/object_name') from mpx.lib.bacnet._bacnet import read_property_multiple_g3 as rpm rpm(aipv.bacnet_property.device, [aipv.bacnet_property.property_tuple, ain.bacnet_property.property_tuple]) from mpx.service.subscription_manager._manager import SUBSCRIPTION_MANAGER as SM from mpx.lib.node import as_node ai1 = as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01') aipv=as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01/present_value') ain=as_node('interfaces/eth0/BACnetIP/2/AnalogInput_01/object_name') sid = SM.create_polled({1:aipv, 2:ain}) from mpx.service.subscription_manager._manager import SUBSCRIPTION_MANAGER as SM from mpx.lib.node import as_node aipv=as_node('services/network/BACnet/internetwork1/Devices/1/8/1/77') ain =as_node('services/network/BACnet/internetwork1/Devices/1/8/1/79') pcm1=as_node('services/network/BACnet/internetwork1/Devices/1/151/1/85') pcm2=as_node('services/network/BACnet/internetwork1/Devices/1/151/1/79') up = as_node('services/network/BACnet/internetwork1/Devices/1/151/1/10209') mdd= as_node('services/network/BACnet/internetwork1/Devices/1/151/1/10210') sid = SM.create_polled({1:aipv, 2:ain, 3:pcm1, 4:pcm2, 5:up, 6:mdd}) sid = SM.create_polled({5:up,6:mdd}) SM.poll_all(sid) SM.destroy(sid)
def destroy(self, poll_id): return SM.destroy(poll_id)