def exception(self, message=None): msglog.exception(prefix="handled") self.server.exceptions.increment() try: self.error(500, message) except: msglog.exception(prefix="handled")
def create_node(self, factory, nodeurl, **config): try: as_node(nodeurl) except KeyError: pass else: raise TypeError("Node exists: %s" % nodeurl) if isinstance(factory, str): module,sep,name = factory.rpartition(".") if name: exec("import %s" % module) factory = eval(factory) parent,sep,name = nodeurl.rpartition("/") configuration = {"name": name, "parent": parent} configuration.update(config) node = factory() try: node.configure(configuration) except: msglog.log("broadway", msglog.types.WARN, "Error prevented configuration of new node: %s" % node) msglog.exception(prefix="handled") try: node.prune() except: msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Node successfully pruned.") else: msglog.log("broadway", msglog.types.INFO, "New node created: %s" % node) self.updatepdo(nodeurl, node) node.start() return node.configuration()
def clear_subscr(self, targets=None): if self.line_handler is None: return if (type(targets) == types.ListType): targets0_type = type(targets[0]) if (targets0_type == types.TupleType): pass elif (targets0_type == types.StringType): new_targets = [] for node_URL in targets: node = None try: node = as_node(node_URL) except ENoSuchName: msglog.exception() continue new_targets.append((node.lan_address,node.id_number,)) if len(new_targets) == 0: return targets = new_targets else: raise EInvalidValue('targets',targets,'Should be None, int, ' \ 'or list of 2-tuples (dev_id, obj_id) or ' \ 'a list of node URL strings.') self.line_handler.clear_subscr(targets) return
def _alarm_code(self, alarm_dict): result = "N/A" try: result = alarm_dict['state'] except: msglog.exception() return result
def set(self, newval): # newval = str(newval).split('.', 1)[0] if newval.isdigit(): newval = int(newval) if not command.has_key(newval): msglog.log('USAP', WARN, "Object:set() with invalid command - %s" % str(newval)) raise EInvalidValue('Invalid Command', str(newval), 'Valid commands are \'0:DACT\' or \'1:ACTI\'.') if self.value == command[newval]: msglog.log('USAP', INFO, "Object:set() - Current value is same as new value \'%s\', so no action required." % str(command[newval])) return; vData = string.join(['\x00\x00\x40', self.path, '.', str(command[newval])], '') msglog.log('USAP', INFO, "Object:set() - newval = %s, vData(%d) = %s" % (str(command[newval]), len(vData), str(vData))) self.req_vData_obj.setValue(vData) # res = None # self.lh.connection.drain() if self.unison_v1_9_0_or_prior == 1: try: self.lh.send_request_without_response(self.request_obj, 30) time.sleep(1) except Exception, e: msglog.log('USAP', WARN, "Object:set() - Got exception trying to send request") msglog.exception() raise ETimeout('Error sending USAP request')
def cancel(self): msglog.log('broadway', WARN, '%r closing channel' % self) try: self.channel.close() except: msglog.exception(prefix = 'Handled') return
def invoke(self, request): data = self._get_data(request) results = [] # process xmlrpc, getting the name of the method # and parameters r = xmlrpclib.loads(data) params = r[0] method = r[1] for param in params: # since we are spliting on ":" I put in a place holder # for the mpx:// before I do a split the put it back after # since mpx:// has a ":" param = param.replace("mpx://", '$$PLACE_HOLDER$$') p_s = param.split(':') service = p_s[0].replace('$$PLACE_HOLDER$$', 'mpx://') method = p_s[1] try: result = self._invoke(service, method) results.append(result) except Exception, e: if self.log_n_exceptions: msglog.exception() if self.log_n_exceptions > 0: self.log_n_exceptions -= 1 results.append(self._exception_string(e))
def proxy_active_set(self, dummy): #print 'active proxy event' self._proxy_active_thread_lock.acquire( ) #only one set at a time is active try: try: event = None self._proxy_active_lock.acquire() try: event = self._proxy_active_event self._proxy_active_event = None if self._proxy_trigger_counter: self._proxy_trigger_counter -= 1 finally: self._proxy_active_lock.release( ) #allow any new covs while we do the set if event: #pending event if self.debug: print str(event) value = event.results()[1]['value'] if isinstance(value, Exception): raise value try: #to set() value on destination node self._proxy_active_destination.set( value) #don't know how long this will take self._proxy_set_exception(None) #failure in attempt to set data, maybe node is not ready yet, try again later except (ETimeout, EConnectionError, ENotStarted, ENoSuchNode): #put the event back in the active event if no new one has come in while we were trying to set() self._proxy_active_lock.acquire() try: if self._proxy_active_event is None: self._proxy_active_event = event #put it back in for next attempt unless a new one came in finally: self._proxy_active_lock.release( ) #allow any new covs while we do the set scheduler.seconds_from_now_do( 60, self._proxy_trigger_queue ) #try again in one minute raise #re-raise the set() exception except: raise if self.debug: print 'proxy_active_set call set returned' except Exception, e: try: self._proxy_set_exception(e) # we have squashed the exception # we want to log exceptions that are potential bugs # but we don't want to fill the msglog with ETimeouts if not isinstance(e, ETimeout): msglog.exception() except: # if there is a bug in the set_exception method we want # to see this otherwise it makes debugging difficult msglog.exception() finally: self._proxy_active_thread_lock.release() if self.debug: print 'proxy_active_set done' pass
def configure(self, config): Module.configure(self, config) if not self.version: self.version = m.ReadModuleName() # Import appropriate the ADAM module (or package). module = 'mpx.ion.adam.adam' + self.version command = compile('import ' + module, 'mpx.ion.adam.unknown.configure()', 'exec') eval(command) # Get the module's factory and instanciate the "real" class. command = module + '.factory' adam_factory = eval(command, globals(), locals()) self.instance = adam_factory() # Scary stuff. Detach this ion from our parent, configure the real # instance (thus attaching it to the parent) and then morph this # instance to behave like the real instance (in case anyone has a # handle to this instance). try: self.parent._del_child(self.name) self.instance.configure(config) attributes = vars(self.instance.__class__) attributes.update(vars(self.instance)) for attrribute in attributes: setattr(self, attrribute, getattr(self.instance, attrribute)) except: msglog.exception() # The scary stuff failed, reattach to the parent. try: self.parent._del_child(self.instance.name) except: pass self.parent._add_child(self)
def _tock(self): while 1: try: self.semaphore.acquire() #scheduler.seconds_from_now_do(1.0, self._tick) _module_lock.acquire() try: if debug > 4: print 'FDT tock' if (self.entries): for key in self.entries.keys(): entry = self.entries[key] if entry.tick(): #time to remove try: del self.entries[key] if debug > 4: print "timeout on foreign device" except: pass finally: _module_lock.release() except: if msglog: msglog.exception() msglog.log('broadway', msglog.types.INFO, 'FDT timer thread restarting\n') pause(10.0)
def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node( ) if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered( self, {1: self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % ( self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def write_new_table_contents(self, WBDT_message): try: allow = 1 try: allow = self.node.allow_external_table_editor except: pass if allow: self.ourBDTentry = None self.entries = WBDT_message.entries answer = Result( 0x0000) #default response that everything was ok try: if self.node: #persist the data self.node.save_table(self.as_text()) except: #don't disturb proper response code msglog.exception() else: raise EPermission( 'Supervising node: %s will not allow external editor' % self.node.name) except: msglog.exception() answer = Result(0x0010) #nak return answer
def find_template_named(self, name): #name should be in the form ../jobname/definitionname. return the node if name.find('../') != 0: return None try: print name n = name.replace('%2F', '/') x, a, t = n.split('/', 2) print x, a, t if self.has_child(a): print 'found: ', a n = self.get_child(a) print 'node: ', n.as_node_url() answer = n.find_template_named(t) if answer: print 'template: ', answer.as_node_url() else: print 'template not found: ', str(t) return answer #tie in rznet peer apps here!! #look in the com ports for a matching rz net peer app # interfaces = as_node('/interfaces') # for n in interfaces.children_nodes(): # if n.has_child('rznet_peer'): # rznp = n.get_child('rznet_peer') # if rznp.has_child(a): #found the app ##form url for template # answer = rznp.as_node('%s/%s' %(a,t)) # print 'found rznp template: ', answer.as_node_url() # return answer except: msglog.exception() return None
def prune_orphaned_schedules(self): # remove schedules under /services/time/local/TIM that have no app manager = as_node('/services/time/local') if manager.has_child('TIM'): try: sh = as_node('/services/time/local/TIM') name_header = 'RZSched_' # create list of RZSched_'s under the TIM node schedules = filter( lambda k: k[:len(name_header)] == name_header, sh.children_names()) # compare appname after RZSched_, upto : with our children names orphans = filter( lambda k: k.split('_')[1].split(':')[0] not in self. children_names(), schedules) for o in orphans: try: sh.get_child(o).prune() msglog.log('Graphical Control:', 'pruned orphaned schedule: ', o) except: msglog.exception() if len(orphans): sh.save_schedule() except: msglog.exception()
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log( 'Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do( self.sample_period, self.run_update) return
def save_trends(self, trend_list): # Traverse through _pdo.items and check if new domain is either subset # of any configured or superset. # If subset return with msg already covered and dont save this # If superset then configure new ones and delete subset from # _pdo.items '''Adding and saving trends''' for point in reversed(trend_list): point_period = point['frequency'] point_domain = point['domain'] for saved_domain, saved_period in tuple(self._pdo.trends.items()): if saved_domain == point_domain: if saved_period != point_period: self.delete_trend_configuration(saved_domain) break if not self._pdo.trends.has_key(point_domain): # add this trend try: domain_node = as_node(point_domain) if isinstance(domain_node, EnergywiseSwitch) or isinstance( domain_node, EnergywiseDomain): self.add_trend_configuration(point_period, point_domain) domain_node.new_trend(point_period) except Exception: msglog.exception() msglog.log( "Energywise", msglog.types.ERR, "Failed to create trend for %r every %r seconds" % (point_domain, point_period)) return
def close_channels(self): while self.channels: channel = self.channels.pop() try: channel.close() except: msglog.exception(prefix="handled")
def completion_handler(self, cb): try: if self._callback: self._callback(self) except: msglog.exception(prefix='Handled') return
def get(self, manager, **keywords): if debug: print 'Start of Batch:%s' % (self.properties) now = time.time() self.last_get_time = now callback = None if keywords.has_key('callback'): keywords['callback'].callback(self.rpm_callback) try: #print 'Calling read property multiple' rars = rpm(self.device, self.properties, self.timeout, **keywords) if isinstance(rars, Callback): return rars #if we are in callback mode #print 'RPM TIME:%s' % (time.time()-now,) except BACnetException, e: if len(self.ids) == 1: #single propety rpms throw exceptions at this level return {self.ids[0]: e} #simply return exception as the result self.total_rpm_error_counter += 1 if self.total_rpm_error_counter > 0: #catch run away thrashing for id in self.ids: nr = self.map[id] nr.set_batch_manager(None) #none of these points can batch raise EBadBatch('bacnet', self, 'error threshold exceeded') #at this point this is a multiple property read that failed msglog.exception() if debug: print 'rpm failed, trying rp', str(self.properties) answer = {} for id in self.ids: nr = self.map[id] result = nr.get_result() answer[id] = result if isinstance(result.value.value, BACnetError): nr.set_batch_manager(None) #turn off rpm for the offending property msglog.log('bacnet', nr.as_node_url(), 'cannot get multiple') if debug: msglog.exception() raise EBadBatch('bacnet', self, 'rpm failed')
def cancel(self): msglog.log('broadway', WARN, '%r closing channel' % self) try: self.channel.close() except: msglog.exception(prefix='Handled') return
def scan_nodes_for_changes(self): sub_dict = SM.poll_all(self.sid) self.node_values=sub_dict print 'scan_nodes_for_changes sm:', sub_dict if sub_dict: #if there have been any changes #look at the value in the sheet and send it if there is a difference for row_index in sub_dict.keys(): try: url, mode, value = self.sheet[row_index] new_value = sub_dict[row_index]['value'] if mode < 3: #not write only update = 0 print "compare: ", str(new_value), value if str(new_value) != value: update = 1 try: if new_value == eval(value): #check numeric comparison update = 0 except: pass #text words won't eval if update: print 'scan_nodes_for_changes set_cell:', row_index self.set_cell(row_index, 3, new_value) except: msglog.exception()
def _alarm_text(self, alarm_dict): result = "N/A" try: result = alarm_dict['data'] except: msglog.exception() return result
def stop(self): try: disable_bbmd(self.parent.interface.network) self._persistent_table = None except: msglog.exception() CompositeNode.stop(self)
def clear_subscr(self, targets=None): if self.line_handler is None: return if (type(targets) == types.ListType): targets0_type = type(targets[0]) if (targets0_type == types.TupleType): pass elif (targets0_type == types.StringType): new_targets = [] for node_URL in targets: node = None try: node = as_node(node_URL) except ENoSuchName: msglog.exception() continue new_targets.append(( node.lan_address, node.id_number, )) if len(new_targets) == 0: return targets = new_targets else: raise EInvalidValue('targets',targets,'Should be None, int, ' \ 'or list of 2-tuples (dev_id, obj_id) or ' \ 'a list of node URL strings.') self.line_handler.clear_subscr(targets) return
def start(self): self.security_manager = self.nodespace.as_node(self.security_manager) self._pdo_lock.acquire() try: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.valuedrivers = {} self._pdo.load() valuedriverdumps = self._pdo.valuedrivers.items() finally: self._pdo_lock.release() super(DriverConfigurator, self).start() tstart = time.time() for drivername, driverdump in valuedriverdumps: try: IPickles(cPickle.loads(driverdump))() except: message = self.LOADFAILURE % (self.name, 'Value Driver', drivername) msglog.log('broadway', msglog.types.ERR, message) msglog.exception(prefix='Handled') tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Value Driver Configurator loaded ' '%s nodes in %s seconds.' % (len(valuedriverdumps), tlapse)) return
def _work(self): NETLOC = 1 PATH = 2 while 1: callback = self._worker_queue.get() try: result = self.get() except: msglog.exception() result = 'None' try: data = urllib.urlencode({'result':result}) p_url = urlparse(callback) conn = httplib.HTTPConnection(p_url[NETLOC]) conn.request('POST', p_url[PATH], data) rsp = conn.getresponse() msg = '%s sent a response of %s to %s.' % \ (self.as_node_url(), result, callback) msglog.log( 'broadway', msglog.types.INFO, msg ) except: msglog.exception() return
def start_tunnel(self): if self is not currentThread(): self._send_cmd('start') return self._op_lock.acquire() try: self.is_active = 1 # set up Port object for the tunnel that reads\writes to the # slave device file of the pseudo-terminal pair. if not self._serial_port: self._serial_port = Port() cfg = self._vcp.configuration() cfg['dev'] = self.tty cfg['name'] = '_slave' cfg['parent'] = self._vcp self._serial_port.configure(cfg) self._op_lock.release() except: self._op_lock.release() while self.is_active: if not self._serial_port.is_open(): self._serial_port.open() self._serial_port.drain() try: if self.is_server: self._do_listen() else: self._do_connect() except: msglog.exception() if self._serial_port and not self._serial_port.is_open(): self._serial_port.close()
def export(self, alarm, attempt=0): self._lock.acquire() try: if (not self._started): self._alarm.append(alarm) # No need to set scheduler here; start() will call # export_waiting_alarm()... return # Even if this node is already started, do not attempt to # export alarm unless the linked log node and its collector # object are extant and started: if (self.log.collector is None): self._alarm.append(alarm) if (self._waiting_alarm_sid is None): # if we're not already scheduled, do it: # Need to wait long enough for log.start() to finish creating # and starting collector. ***GUESS*** 10.0 sec. Symptom of not # waiting long enough: ENotStarted error raised below: self._waiting_alarm_sid = scheduler.after( 10.0, self.export_waiting_alarm, ()) return finally: self._lock.release() self.log.collector.pause() try: try: if not self.log.collector.running: raise ENotStarted('Collector not started yet.') entry = self.log.collector.get_entry() entry[self.ts_position] = time.time() # Stamp source, if target log columns support it: if isinstance(self.trigger_node_url_posn, int): entry[self.trigger_node_url_posn] = as_node_url( alarm.source) if isinstance(self.trigger_node_msg_posn, int): entry[self.trigger_node_msg_posn] = str(alarm) self.log.add_entry(entry) t = time.time() for child in self.log.get_child('exporters').children_nodes(): child.go(t) # starts threads for long ops except: msglog.exception() if attempt > alarm.source.send_retries: msglog.log('broadway', msglog.types.WARN, 'Export of alarm failed, aborting send.') raise MpxException('Log and export failed.') else: msglog.log('broadway', msglog.types.WARN, 'Log on alarm failed, delaying 1.0 sec.') self._lock.acquire() try: if self._scheduled != None: scheduler.cancel(self._scheduled) self._scheduled = scheduler.after( 1, self.export, (alarm, attempt + 1)) finally: self._lock.release() finally: self.log.collector.play() return
def poll_server(self): self.debug_message('initiating poll') try: self._request_data() except: msglog.exception(prefix="handled") self.polls_completed += 1
def export(self, alarm): entry = {} entry['alarm'] = alarm.source.name self._timestamp = alarm.timestamp entry['timestamp'] = alarm.timestamp entry['critical_value'] = alarm.critical values = alarm.values for key in values.keys(): entry[key] = values[key] entry['message'] = alarm.message if hasattr(alarm, 'subject'): entry['subject'] = alarm.subject data = self.formatter.format([entry]) tries = alarm.source.send_retries + 1 while tries: tries -= 1 try: if self.connection.acquire(self.timeout): try: self.transporter.transport(data) return finally: self.connection.release() except: msglog.log('broadway', msglog.types.WARN, 'Failed attempt to send alarm %s' % alarm) msglog.exception() else: raise MpxException('Export failed.')
def _tick(self): reschedule = [] entries = self._entries.popentries(uptime.secs()) self._condition.release() try: for entry in entries: try: entry.execute() except EActionExpired: pass except EActionPremature: message = 'entry %s execution premature, will reschedule' self.debugout(message, STANDARDDEBUG, entry) reschedule.append(entry) except: msglog.exception() else: if isinstance(entry, RecurringEntry): reschedule.append(entry) finally: self._condition.acquire() for entry in reschedule: entry.computewhen() self._entries.addentries(reschedule) nextrun = self._entries.nextruntime() if nextrun is not None: nextrun = max(0, nextrun - uptime.secs()) return nextrun
def handle_error(self): try: msglog.warn("%s handling error. Please check whether the peer is up and supports the %s protocol" % (self, self.protocol)) msglog.exception(prefix="handled") msglog.inform("%s closing due to error." % self) finally: self.close()
def completion_handler(self, cb): try: if self._callback: self._callback(self) except: msglog.exception(prefix = 'Handled') return
def handle_request(self, request): print 'handle_request' if not self.running: return request.error(503) #service unavailable try: self.data = data = request.get_data().read_all() if not data: raise EProtocol('could not get DATA parameter from posted data') ## process xmlrpc, getting the name of the method ## and parameters params, method = xmlrpclib.loads(data) return object_alias = '' method_name = '' ## get the name of the object ## and the name of method ## They are delimited by a colon. except: msglog.exception() raise MpxException('Error occurred while processing Brivo XMLRPC command') # XML-RPC Call was successful. # Send back the XML result to client reply = Response(request) reply.set_header('Content-Length', len(response)) reply.set_header('Content-Type', 'text/xml') reply.send(response) return
def update(self, changes): for nid, result in changes.items(): try: self.get_child(nid).update(result) except: msglog.exception() return len(changes)
def start(self): RequestHandler.start(self) self.WEB_ROOT = properties.HTTP_ROOT if self.parent.server_type == 'HTTPS': self.WEB_ROOT = properties.HTTPS_ROOT for handler in self.parent.children_nodes(): if isinstance(handler, FileSpace): self.filespace = handler break err_message = 'PSP handler not running in secured mode because \ no %s was found. Config parameter "secured" \ being overriden to False' if self.secured and self.filespace: try: sm = as_node('/services/Security Manager') except KeyError: msglog.log('broadway', msglog.types.WARN, err_message % 'Security Manager') msglog.exception(prefix = 'Handled') self.provides_security = self._secured = False else: self.security_manager = sm self.provides_security = self._secured = True else: if self.secured: # not secured because we could not locate filespace object. msglog.log('broadway', msglog.types.WARN, err_message % 'FileSpace manager') self.provides_security = self._secured = False
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log('Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def login(self): #send the initial series of packets to get started if self.debug: print 'start login' try: self.drain_socket() if self.debug: print 'write device list request' self.write(DeviceListRequest()) self._device_list = DeviceListResponse(self.read()) self.device_number = self._device_list.units()[self.device_name] if self.debug: print 'write login request' self.write(LoginRequest(self.device_number, self.user, self.password)) self._login_response = LoginResponse(self.read()) if self.debug: print 'write request 3' self.write(Request3(self.device_number)) self._response3 = Response3(self.read()) if self.debug: print 'write request 4' self.write(Request4(self.device_number)) self._response4 = Response4(self.read()) if self.debug: print 'write request 5' self.write(Request5(self.device_number)) self._response5 = self.poll_for_incomming_packets() #Response5(self.read()) #may get first screen update before response if self.debug: print 'login successful' for i in range(10): #get the first screen complete before moving on to new pages self.poll_for_incomming_packets() self.send_key('f9') for i in range(10): #get the first screen complete before moving on to new pages self.poll_for_incomming_packets() return 1 except: print 'CPC login failed' msglog.exception() return 0
def confirmed_service_indication(network, the_device, msg): import _bacnet try: if msg.choice == BACNET_READ_PROPERTY: response = _bacnet.server_read_property(the_device.node, msg) elif msg.choice == BACNET_READ_PROPERTY_MULTIPLE: response = _bacnet.server_read_property_multiple( the_device.node, msg) elif msg.choice == BACNET_WRITE_PROPERTY: response = _bacnet.server_write_property(the_device.node, msg) elif msg.choice == BACNET_WRITE_PROPERTY_MULTIPLE: response = _bacnet.server_write_property_multiple( the_device.node, msg) elif msg.choice == BACNET_CONFIRMED_COV_NOTIFICATION: response = server_cov_notification_msg(the_device.node, \ msg, confirmed = True) else: #reject amything else raise ENotImplemented('bacnet server', msg.choice, 'service choice not supported') if response and (not _is_master_server(the_device, network)): response.sspec = 1 response.slen = 6 #what should we chose? response.sadr = utils.bytes_as_string_of_hex_values( the_device.instance_number, response.slen) response.snet = the_device.network #correct local copy? except Exception, e: msglog.exception() rp = npdu.NPDU() rp.pdu_type = BACNET_REJECT_PDU rp.version = 1 rp.invoke_id = msg.invoke_id rp.reason = 9 # Unsupported-Service response = rp
def logout(self): try: buffer = str(SignOffRequest1(self.device_number)) + str(SignOffRequest2(self.device_number, self.user)) #combine two pkts into one self.write(buffer) self._device_list = SignOffResponse(self.read()) except: msglog.exception()
def _stop(self): while self.state is self.PENDING: pause(.1) if self.state is not self.STOPPED: self.state = self.HALTING msg = 'RNA service stopping on %s.' msglog.log('broadway', msglog.types.INFO, msg % self.transport) try: # Hack to wake up the tread... t = self.transportClass(**self.configuration()) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a connect() method: t.connect() i = mpx.lib.rna._InvokeCommand("BOGUS") i.pack(ProtocolCommand('/','no_such_method_i_hope',())) i.totransport(t.send) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a disconnect() method: t.disconnect() while self.state is not self.STOPPED: pause(.1) return 1 except: msglog.exception() return 0
def save_trends(self, trend_list): # Traverse through _pdo.items and check if new domain is either subset # of any configured or superset. # If subset return with msg already covered and dont save this # If superset then configure new ones and delete subset from # _pdo.items '''Adding and saving trends''' for point in reversed(trend_list): point_period = point['frequency'] point_domain = point['domain'] for saved_domain,saved_period in tuple(self._pdo.trends.items()): if saved_domain == point_domain: if saved_period != point_period: self.delete_trend_configuration(saved_domain) break if not self._pdo.trends.has_key(point_domain): # add this trend try: domain_node = as_node(point_domain) if isinstance(domain_node,EnergywiseSwitch) or isinstance(domain_node,EnergywiseDomain): self.add_trend_configuration(point_period, point_domain) domain_node.new_trend(point_period) except Exception: msglog.exception() msglog.log( "Energywise",msglog.types.ERR, "Failed to create trend for %r every %r seconds" %(point_domain,point_period) ) return
def _alarm_type(self, alarm_dict): result = "N/A" try: result = alarm_dict['type'] except: msglog.exception() return result
def _inform_garbage_collector(list): try: gc = as_internal_node('/services/garbage_collector') gc.set_faillist(list) except: msglog.exception(msglog.types.INFO) return
def _proxy_start_active_mode(self): if self.link: try: if self._proxy_sid is None: #have not started subscription service yet if self.proxy_direction == GET_ONLY: self._proxy_active_source = self._proxy_linked_node() if self._proxy_active_source is None: raise ENotStarted() self._proxy_active_destination = self else: #SET_ONLY self._proxy_active_source = self self._proxy_active_destination = self._proxy_linked_node() if self._proxy_active_destination is None: raise ENotStarted() self._proxy_active_queue = Queue() self._proxy_sid = SM.create_delivered(self, {1:self._proxy_active_source}) if self.debug: print 'Active proxy %s started successfully' % (self.name) except: #it didn't work. Setup schedule to try again in x seconds. if self._retry_win_high < 90: self._retry_win_high += 1 retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high) scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode) #raise #took this out since it mostly just served to force the scheduler tread to restart if self.debug: msglog.exception()
def _configure(filename=properties.CONFIGURATION_FILE): faillist = [] try: if megatron: megatron.setConfiguring() if os.path.exists('/home/mpxadmin/.db_config'): #from mpx.lib.configure import db_config _configure_from_db(filename, faillist) else: _configure_file(filename, faillist) except: print '***** Exception during _configure_file', sys.exc_info() msglog.exception(msglog.types.ERR) # Ensure that the system has the minimum functional requirements. print '*** Performing minimal configuration ***' ensure_minimal_configuration() configuration_file = filename root = mpx.lib.node.as_internal_node('/') # Start all nodes, top down, breadth first. if megatron: megatron.setStarting() root.start(stage=6) # Inform the garbage_collector of any nodes which failed _inform_garbage_collector(faillist) if megatron: megatron.setRunning() return root
def discover_name(self, name, **options): if self._should_discover(**options): try: self.discover() except Exception, e: msglog.exception() raise ENoSuchName(name)
def start(self): msglog.log('Delphi', INFO, "Property Start") if self.parent.enabled: self.local_file_path = '/tmp/'+self.name+'_EVENTS.NSS' # Lets remove if there is any stale file if exists(self.local_file_path): remove(self.local_file_path) if exists(self.local_file_path+'_bak'): remove(self.local_file_path+'_bak') if exists(self.local_file_path+'_prevday'): remove(self.local_file_path+'_prevday') for child in self.children_nodes(): if child.identity == 'status': self.downloadStatus = child break if self.interface_pc_addr == None or self.event_file_location == None: msglog.log('Delphi', WARN, "Check interface pc address and event file location configurations") else: protocol=self.communication_interface+'://' if self.interface_pc_addr[:len(protocol)] == protocol: self.interface_pc_addr = self.interface_pc_addr[len(protocol):] if not self.scheduler.is_started(): try: self.scheduler.start() self.schedulePollAfterInterval( 5 )#start scheduler in 5 second except: msglog.exception() super(Property, self).start()
def handle_update_password(self, request): parameters = request.get_query_dictionary() parameters.update(request.get_post_data()) # POST value for destination overrides query-string if both provided. username = parameters.get("username", "") password = parameters.get("password", "") newpwd = parameters.get("newpwd", "") confirm_newpwd = parameters.get("confirm_newpwd", "") try: if Properties.get_boolean('PAM_ENABLE'): user = self.manager.user_from_pam(username, password) else: user = self.manager.user_from_cleartext(username, password) except (PAMError, EAuthenticationFailed): msglog.exception(prefix="Handled") msg = "Invalid old password. Enter correct old password for authentication." self.handle_form(request, "update_password", "Authentication Failed", msg) else: if newpwd == confirm_newpwd: try: user.set_password(newpwd) except EInvalidValue, e: msglog.exception(prefix="Handled") msg = "".join(e[1].splitlines()) self.handle_form(request, "update_password", "Invalid new password. Please try again", msg) else: msg = "Password updated successfully for %s." % (username) msglog.log("broadway", msglog.types.INFO, msg) self.handle_form(request, "information", msg) else:
def configure_node(self, nodeurl, config): node = self.get_managed_node(nodeurl) node.stop() try: node.configure(config) except: msglog.log("broadway", msglog.types.WARN, "Error prevented reconfiguration of node: %s" % node) msglog.exception(prefix="handled") msglog.log("broadway", msglog.types.WARN, "Rolling back configuration.") try: node.configure(self.nodes[nodeurl]) except: msglog.log("broadway", msglog.types.WARN, "Configuration rollback failed.") msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Rollback of configuration succeeded.") else: msglog.log("broadway", msglog.types.INFO, "Node reconfigured: %s" % node) self.updatepdo(nodeurl, node) finally: node.start() return node.configuration()
def _stop(self): while self.state is self.PENDING: pause(.1) if self.state is not self.STOPPED: self.state = self.HALTING msg = 'RNA service stopping on %s.' msglog.log('broadway', msglog.types.INFO, msg % self.transport) try: # Hack to wake up the tread... t = self.transportClass(**self.configuration()) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a connect() method: t.connect() i = mpx.lib.rna._InvokeCommand("BOGUS") i.pack(ProtocolCommand('/', 'no_such_method_i_hope', ())) i.totransport(t.send) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a disconnect() method: t.disconnect() while self.state is not self.STOPPED: pause(.1) return 1 except: msglog.exception() return 0