def _read(self): dallas = self.parent dallas.reset() dallas.matchrom(self.address) dallas.readscratch() result = dallas.readbytes(9) if crc_of(result) == 0: t = unpack('<h', result[:2])[0] self.result = t if self.bad_crc_count > 10: #was logged as bad msglog.log('DallasBus', 'information', 'Resumed: %s' % \ as_node_url(self)) self.bad_crc_count = 0 t = uptime_secs() self.scan_period = t - self._last_read_time self._last_read_time = t if self.debug: print "TEMPERATURE SENSOR: _read value", self.result else: if self.bad_crc_count < 11: self.bad_crc_count += 1 if self.bad_crc_count == 10: self.result = None #return ETimeout to gets raise EInvalidResponse('Failed read ten times: %s' % \ as_node_url(self)) if self.debug: print "TEMPERATURE SENSOR: BAD CRC", result return
def stop(self): if not isinstance(self.manager, str): self.manager.dispatcher.unregister(self.sub) self.manager = as_node_url(self.manager) if not isinstance(self.security_manager, str): self.security_manager = as_node_url(self.security_manager) return super(CloudConfigurator, self).stop()
def treenode(node, children=True): description = {"name": node.name, "path": as_node_url(node)} if children: description["children"] = [] if hasattr(node, "children_nodes"): description["children"].extend({"_reference": as_node_url(child)} for child in node.children_nodes()) return description
def _setup(self): ckts_svc_node = self.get_child('Circuits') racks_svc_node = self.get_child('Racks') dev_nodes = self._cpc_uhp_node.children_nodes() has_CircuitStatus_node = 0 for dev_node in dev_nodes: ckt_svc_nodes = [] if dev_node.has_child('Circuit'): ckts_dev_node = dev_node.get_child('Circuit') ckt_dev_nodes = ckts_dev_node.children_nodes() for ckt_dev_node in ckt_dev_nodes: ckt_name = ckt_dev_node.get_child('Name').get() ckt_svc_node = CktStatusXltrNode(ckt_dev_node) cd = {'parent': ckts_svc_node, 'name': ckt_dev_node.name} ckt_svc_node.configure(cd) ckt_svc_nodes.append(ckt_svc_node) num_temps_node = ckt_dev_node.get_child( 'NumberOfTempSensors') num_temps = num_temps_node.get() if (num_temps is None) or isinstance(num_temps, Exception): num_temps = 6 for i in range(num_temps): case_temp_dev_node = ckt_dev_node.get_child( 'CaseTemps' + str(i)) case_temp_svc_node = CompositeNode() cd = { 'parent': ckt_svc_node, 'name': str(case_temp_dev_node._obj_inst_num) } case_temp_svc_node.configure(cd) status_svc_node = CktCaseTempsStatusXltrNode( ckt_svc_node, case_temp_dev_node) cd = {'parent': case_temp_svc_node, 'name': 'Status'} status_svc_node.configure(cd) descr_svc_node = SingleAttrNode('Not initialized') cd = { 'parent': case_temp_svc_node, 'name': 'Description' } descr_svc_node.configure(cd) self._cases.append([ckt_name, \ case_temp_dev_node._obj_inst_num, \ as_node_url(case_temp_dev_node), \ as_node_url(status_svc_node), as_node_url(descr_svc_node)],) rack_svc_node = CompositeNode() cd = {'parent': racks_svc_node, 'name': dev_node.name} rack_svc_node.configure(cd) rack_status_node = RackStatusXltrNode(dev_node, ckt_svc_nodes) cd = {'parent': rack_svc_node, 'name': 'Status'} rack_status_node.configure(cd) dev_name = dev_node._dev.get_name() self._racks.append([dev_name, as_node_url(rack_status_node)]) return
def __init__(self, node, auto_load = 0, lock=None, dmtype=None, **keywords): if type(node) == types.StringType: name = node else: name = as_node_url(node) hash_it = 1 self.__meta__ = {'name':name} if keywords.has_key('hash_it'): hash_it = keywords['hash_it'] if not hash_it: self.__meta__ = {'name':None,'file':name} self.__path = keywords.get('path') self.__name = name self.__lock = lock self.__hash_it = hash_it self._persistent = _PersistentStorage(name,lock,hash_it, context=self.global_context(), path=self.__path) # If we are persisting data for a node, then register the relevant # information with the Garbage Collector. self.__deferred_register = None self.__register(name, self._persistent.filename, dmtype) self._loaded = [] self._delete = [] if auto_load: self.load() return
def add_target(self, target): if not isinstance(target, str): targeturl = as_node_url(target) else: targeturl = target try: target = self.nodespace.as_node(targeturl) except KeyError: target = None if targeturl == "/": raise ValueError("Invalid trigger target: %r" % target) self.synclock.acquire() try: if targeturl not in self.targets: self.targets.add(targeturl) if target: self.targetmap[targeturl] = target else: message = "Trigger %r added unresolvable target: %r" msglog.warn(message % (self.name, targeturl)) added = True else: added = False message = "Trigger %r not adding target %r: already exists." msglog.warn(message % (self.name, targeturl)) finally: self.synclock.release() return added
def _setup_options_file( self ): # Create ppp options pnode = self._get_port_node() comment = '# Script generated by the broadway framework\n' \ '# Date: %s for %s\n' % ( str( time.ctime( time.time() ) ), as_node_url( self ) ) self._gen_ppp_options( comment ) # mgetty configuration mc = MgettyConfig() pname = self._get_port_name() mstr = ' speed %d\n' % pnode.baud if self._is_parent_modem(): dnode = self._get_device_node() mstr += ' init-chat "" \d%s OK\n' % dnode.init_string else: mstr += ' direct yes\n' mstr += ' data-only yes\n' mstr += ' toggle-dtr no\n' mstr += ' login-conf-file %s\n' % self.login_config_filename mc.addPort( pname, mstr) mc.save() # Create chap secrets. self._gen_chap_secrets() # Create pap secrets. self._gen_pap_secrets()
def start(self): if not self.has_child('WS Response'): self.wsresponse = CompositeNode() self.wsresponse.configure({'parent': self, 'name': 'WS Response'}) self.xmlnodes = xmldata.XMLDataNode() self.xmlnodes.configure({'name': 'XML Nodes', 'parent': self.wsresponse}) self.soapnodes = soapdata.SOAPDataNode() self.soapnodes.configure({'name': 'SOAP Nodes', 'parent': self.wsresponse}) else: self.wsresponse = self.get_child('WS Response') self.xmlnodes = self.wsresponse.get_child('XML Nodes') self.soapnodes = self.wsresponse.get_child('SOAP Nodes') if not self.has_child('Start time'): nodepath = path.join(as_node_url(self.soapnodes), 'GetAlertsResult/disp/emralert') self.startnode = AttributeNode() self.startnode.configure({'name': 'Start time', 'nodeurl': nodepath, 'attribute': 'start', 'parent': self}) self.endnode = AttributeNode() self.endnode.configure({'name': 'End time', 'nodeurl': nodepath, 'attribute': 'end', 'parent': self}) self.setup() super(CNEDispatch, self).start()
def __init__(self, node, auto_load=0, lock=None, dmtype=None, **keywords): if type(node) == types.StringType: name = node else: name = as_node_url(node) hash_it = 1 self.__meta__ = {'name': name} if keywords.has_key('hash_it'): hash_it = keywords['hash_it'] if not hash_it: self.__meta__ = {'name': None, 'file': name} self.__path = keywords.get('path') self.__name = name self.__lock = lock self.__hash_it = hash_it self._persistent = _PersistentStorage(name, lock, hash_it, context=self.global_context(), path=self.__path) # If we are persisting data for a node, then register the relevant # information with the Garbage Collector. self.__deferred_register = None self.__register(name, self._persistent.filename, dmtype) self._loaded = [] self._delete = [] if auto_load: self.load() return
def _setup_options_file(self): # Create ppp options pnode = self._get_port_node() comment = '# Script generated by the broadway framework\n' \ '# Date: %s for %s\n' % ( str( time.ctime( time.time() ) ), as_node_url( self ) ) self._gen_ppp_options(comment) # mgetty configuration mc = MgettyConfig() pname = self._get_port_name() mstr = ' speed %d\n' % pnode.baud if self._is_parent_modem(): dnode = self._get_device_node() mstr += ' init-chat "" \d%s OK\n' % dnode.init_string else: mstr += ' direct yes\n' mstr += ' data-only yes\n' mstr += ' toggle-dtr no\n' mstr += ' login-conf-file %s\n' % self.login_config_filename mc.addPort(pname, mstr) mc.save() # Create chap secrets. self._gen_chap_secrets() # Create pap secrets. self._gen_pap_secrets()
def export(self, alarm, attempt=0): self._lock.acquire() try: if (not self._started): self._alarm.append(alarm) # No need to set scheduler here; start() will call # export_waiting_alarm()... return # Even if this node is already started, do not attempt to # export alarm unless the linked log node and its collector # object are extant and started: if (self.log.collector is None): self._alarm.append(alarm) if (self._waiting_alarm_sid is None): # if we're not already scheduled, do it: # Need to wait long enough for log.start() to finish creating # and starting collector. ***GUESS*** 10.0 sec. Symptom of not # waiting long enough: ENotStarted error raised below: self._waiting_alarm_sid = scheduler.after( 10.0, self.export_waiting_alarm, ()) return finally: self._lock.release() self.log.collector.pause() try: try: if not self.log.collector.running: raise ENotStarted('Collector not started yet.') entry = self.log.collector.get_entry() entry[self.ts_position] = time.time() # Stamp source, if target log columns support it: if isinstance(self.trigger_node_url_posn, int): entry[self.trigger_node_url_posn] = as_node_url( alarm.source) if isinstance(self.trigger_node_msg_posn, int): entry[self.trigger_node_msg_posn] = str(alarm) self.log.add_entry(entry) t = time.time() for child in self.log.get_child('exporters').children_nodes(): child.go(t) # starts threads for long ops except: msglog.exception() if attempt > alarm.source.send_retries: msglog.log('broadway', msglog.types.WARN, 'Export of alarm failed, aborting send.') raise MpxException('Log and export failed.') else: msglog.log('broadway', msglog.types.WARN, 'Log on alarm failed, delaying 1.0 sec.') self._lock.acquire() try: if self._scheduled != None: scheduler.cancel(self._scheduled) self._scheduled = scheduler.after( 1, self.export, (alarm, attempt + 1)) finally: self._lock.release() finally: self.log.collector.play() return
def children_list(self, node): items = [] nodeobj = mpx.lib.node.as_node(node.as_node_url()) childnodes = sorted(nodeobj.children_nodes(), cmp=nodecmp) for childnode in childnodes: content = [] childname = childnode.name childurl = as_node_url(childnode) child = node.get_child(childname) if hasattr(childnode, 'configuration'): line = '<a class="node-link" href="%s%s">%s</a>\n' content.append(line % (self.request_path,childurl,childname)) else: content.append(childname) # Double has-attribute checks insecured-reference # first to bypass unnecessary authorization calls. # Note that has-attribute call also authorizes access. if hasattr(childnode, "get") and hasattr(child, "get"): content.append(" = ") if hasattr(childnode, "set") and hasattr(child, "set"): html = ('<a href="%s%s?action=get_' 'override" class="node_link">') content.append(html % (self.request_path, childurl)) fmt = '%s</a>' else: fmt = '%s' try: # Note access to 'get' authorized already. value = _htmlescape(self.getvalue(childnode)) except Exception,error: msglog.exception(prefix="handled") value = self._get_exception_msg(error) content.append(fmt % (value,)) items.append("<li>%s</li>" % "".join(content))
def start(self): Column.start(self) if (type(self.__function_config) == types.StringType and string.count(self.__function_config,'as_node') == 1 and self.__function_config.endswith('get')): func = self.__function_config self.__node = as_node(func[func.find('(')+2:func.rfind(')')-1]) if self.use_subscription_manager: self._sid = SM.create_delivered(self, {1:as_node_url(self.__node)}) self.function = self.get_last else: self.function = getattr(self.__node,func[func.rfind('.')+1:]) rexec = self.parent.parent.get_environment() self.original_function = RFunction(self.function, args=self.args, context=self.context, rexec=rexec) self.function = self._convert self.variables = {} nodes = self.children_nodes() for potential_calculator in nodes: if hasattr(potential_calculator, 'evaluate'): if self._calculator: #oops raise EAttributeError('Too many calculator nodes', self) self._calculator = potential_calculator self.function = self._evaluate # hook the calculator in self.__original_function = self.original_function self.original_function = self.__evaluate_original_function self.__started = 1
def start(self): Column.start(self) if (type(self.__function_config) == types.StringType and string.count(self.__function_config, 'as_node') == 1 and self.__function_config.endswith('get')): func = self.__function_config self.__node = as_node(func[func.find('(') + 2:func.rfind(')') - 1]) if self.use_subscription_manager: self._sid = SM.create_delivered(self, {1: as_node_url(self.__node)}) self.function = self.get_last else: self.function = getattr(self.__node, func[func.rfind('.') + 1:]) rexec = self.parent.parent.get_environment() self.original_function = RFunction(self.function, args=self.args, context=self.context, rexec=rexec) self.function = self._convert self.variables = {} nodes = self.children_nodes() for potential_calculator in nodes: if hasattr(potential_calculator, 'evaluate'): if self._calculator: #oops raise EAttributeError('Too many calculator nodes', self) self._calculator = potential_calculator self.function = self._evaluate # hook the calculator in self.__original_function = self.original_function self.original_function = self.__evaluate_original_function self.__started = 1
def __init__(self, node): node_url = as_node_url(node) ImmortalThread.__init__(self, name='_WhoIsThread(%r)' % node_url) self.node = node self.debug = node.debug self.discover_interval = node.discover_interval if self.debug: print '%s.__init__()' % self.getName()
def _setup(self): ckts_svc_node = self.get_child('Circuits') racks_svc_node = self.get_child('Racks') dev_nodes = self._cpc_uhp_node.children_nodes() has_CircuitStatus_node = 0 for dev_node in dev_nodes: ckt_svc_nodes = [] if dev_node.has_child('Circuit'): ckts_dev_node = dev_node.get_child('Circuit') ckt_dev_nodes = ckts_dev_node.children_nodes() for ckt_dev_node in ckt_dev_nodes: ckt_name = ckt_dev_node.get_child('Name').get() ckt_svc_node = CktStatusXltrNode(ckt_dev_node) cd = {'parent':ckts_svc_node,'name':ckt_dev_node.name} ckt_svc_node.configure(cd) ckt_svc_nodes.append(ckt_svc_node) num_temps_node = ckt_dev_node.get_child('NumberOfTempSensors') num_temps = num_temps_node.get() if (num_temps is None) or isinstance(num_temps,Exception): num_temps = 6 for i in range(num_temps): case_temp_dev_node = ckt_dev_node.get_child('CaseTemps' + str(i)) case_temp_svc_node = CompositeNode() cd = {'parent':ckt_svc_node,'name':str(case_temp_dev_node._obj_inst_num)} case_temp_svc_node.configure(cd) status_svc_node = CktCaseTempsStatusXltrNode(ckt_svc_node,case_temp_dev_node) cd = {'parent':case_temp_svc_node,'name':'Status'} status_svc_node.configure(cd) descr_svc_node = SingleAttrNode('Not initialized') cd = {'parent':case_temp_svc_node,'name':'Description'} descr_svc_node.configure(cd) self._cases.append([ckt_name, \ case_temp_dev_node._obj_inst_num, \ as_node_url(case_temp_dev_node), \ as_node_url(status_svc_node), as_node_url(descr_svc_node)],) rack_svc_node = CompositeNode() cd = {'parent':racks_svc_node,'name':dev_node.name} rack_svc_node.configure(cd) rack_status_node = RackStatusXltrNode(dev_node,ckt_svc_nodes) cd = {'parent':rack_svc_node,'name':'Status'} rack_status_node.configure(cd) dev_name = dev_node._dev.get_name() self._racks.append([dev_name, as_node_url(rack_status_node)]) return
def __init__(self, node, *args): self.node = node super(NodeSpaceAdapter, self).__init__(*args) self.nodetree = {} self.url = as_node_url(self.node) self.root = as_node('/') self.as_node = as_node self.as_internal_node = as_internal_node self.as_node_url = as_node_url
def export(self, alarm, attempt=0): self._lock.acquire() try: if (not self._started): self._alarm.append(alarm) # No need to set scheduler here; start() will call # export_waiting_alarm()... return # Even if this node is already started, do not attempt to # export alarm unless the linked log node and its collector # object are extant and started: if (self.log.collector is None): self._alarm.append(alarm) if (self._waiting_alarm_sid is None): # if we're not already scheduled, do it: # Need to wait long enough for log.start() to finish creating # and starting collector. ***GUESS*** 10.0 sec. Symptom of not # waiting long enough: ENotStarted error raised below: self._waiting_alarm_sid = scheduler.after(10.0, self.export_waiting_alarm, ()) return finally: self._lock.release() self.log.collector.pause() try: try: if not self.log.collector.running: raise ENotStarted('Collector not started yet.') entry = self.log.collector.get_entry() entry[self.ts_position] = time.time() # Stamp source, if target log columns support it: if isinstance(self.trigger_node_url_posn, int): entry[self.trigger_node_url_posn] = as_node_url(alarm.source) if isinstance(self.trigger_node_msg_posn, int): entry[self.trigger_node_msg_posn] = str(alarm) self.log.add_entry(entry) t = time.time() for child in self.log.get_child('exporters').children_nodes(): child.go(t) # starts threads for long ops except: msglog.exception() if attempt > alarm.source.send_retries: msglog.log('broadway',msglog.types.WARN, 'Export of alarm failed, aborting send.') raise MpxException('Log and export failed.') else: msglog.log('broadway',msglog.types.WARN, 'Log on alarm failed, delaying 1.0 sec.') self._lock.acquire() try: if self._scheduled != None: scheduler.cancel(self._scheduled) self._scheduled = scheduler.after(1,self.export, (alarm,attempt+1)) finally: self._lock.release() finally: self.log.collector.play() return
def stop(self): self._go = 0 timeout = 30.0 self._thread.join(timeout) if self._thread.isAlive(): msglog.log('mpx',msglog.types.ERR,'%s failed to terminate its ' \ '_poll_alarms thread within %s sec.' % (as_node_url(self),timeout)) Client.stop(self) return
def configure(self, config): name = config.get('name', self.name) parent = as_node_url(config.get('parent', self.parent)) if parent and name: self.create_logger_alias(parent, name) set_attribute(self, 'period', REQUIRED, config, int) set_attribute(self, 'points', REQUIRED, config, tuple) set_attribute(self, 'externally_managed', False, config, as_boolean) super(Trend, self).configure(config) return
def updatepdo(self, nodeurl, node): if self.nodes.has_key(nodeurl): self.nodes.pop(nodeurl) if node: node = as_node(node) nodeurl = as_node_url(node) datatype = type(node) factory = "%s.%s" % (datatype.__module__, datatype.__name__) data = (factory, node.configuration()) self.nodes[nodeurl] = (factory, node.configuration()) return nodeurl
def _dump_cd(n,d=None): if d is None: d = {} d[as_node_url(n)]=n.configuration() if hasattr(n, 'children_nodes'): l = n.children_names() l.sort() for cname in l: c = n.get_child(cname) _dump_cd(c,d) return d
def trigger_targets(self, name): manager = self.manager if self.secured: manager = self.security_manager.as_secured_node(manager) trigger = manager.get_trigger(name) targets = [] for target in trigger.get_targets(True): if not isinstance(target, str): target = as_node_url(target) targets.append(target) return targets
def clear(self, source, *args, **kw): result = self._source_clear(source, *args, **kw) if not isinstance(source, str): source = as_node_url(source) for target in self.get_targets(): try: target.clear(source, *args, **kw) except: message = "%s failed to clear target: %s." msglog.warn(message % (self, target)) msglog.exception(prefix="handled") return result
def __getstate__(self): nodeurl = as_node_url(self.context) configstate = self.context.configuration() for varname in self.excludevars: if configstate.has_key(varname): del(configstate[varname]) privatestate = dict([(varname, getattr(self.context, varname)) for varname in self.privatevars]) return {'type': self.contexttype, 'state': privatestate, 'nodeconfig': configstate, 'nodeurl': nodeurl}
def __init__(self, node, auto_load = 0, **keywords): if type(node) == types.StringType: name = node else: name = as_node_url(node) hash_it = 1 if keywords.has_key('hash_it'): hash_it = keywords['hash_it'] self._persistent = _PersistentStorage(name, hash_it) self._loaded = [] self._delete = [] if auto_load: self.load()
def nodecmp(a,b): facade = False #nodecmp over RNA is currently very expensive. Avoid that. if isinstance(a, NodeFacade) or isinstance(b, NodeFacade): a_name = a.as_node_url().split('/')[-1] b_name = b.as_node_url().split('/')[-1] facade = True else: a_name = a.name b_name = b.name if a.hasattr('nodecmp') and not facade: return a.nodecmp(as_node_url(b)) return cmp(a_name, b_name)
def __getstate__(self): nodeurl = as_node_url(self.context) configstate = self.context.configuration() for varname in self.excludevars: if configstate.has_key(varname): del (configstate[varname]) privatestate = dict([(varname, getattr(self.context, varname)) for varname in self.privatevars]) return { 'type': self.contexttype, 'state': privatestate, 'nodeconfig': configstate, 'nodeurl': nodeurl }
def test_2_anchors_only(self): system.configure(os.path.join(properties.ROOT, 'mpx/system/_test_2_anchors_only.xml')) root = as_internal_node('/') if root.exception is not None: args = [] args.extend(root.exception.args) args.append('Unexpected exception instiating anchors only.') raise root.exception for node in root.children_nodes(): if node.name not in ('services','interfaces', 'aliases'): raise 'Unexpected anchor node %s' % as_node_url(node) # Now add on the "required" services. system.ensure_minimal_configuration() return
def test_2_anchors_only(self): system.configure( os.path.join(properties.ROOT, 'mpx/system/_test_2_anchors_only.xml')) root = as_internal_node('/') if root.exception is not None: args = [] args.extend(root.exception.args) args.append('Unexpected exception instiating anchors only.') raise root.exception for node in root.children_nodes(): if node.name not in ('services', 'interfaces', 'aliases'): raise 'Unexpected anchor node %s' % as_node_url(node) # Now add on the "required" services. system.ensure_minimal_configuration() return
def start(self): """Start the memory use test's thread.""" if self._run: print "ERROR: Test already started" return nrt = {} self._nids = [] getable_nodes = all_getable_nodes(self._anchor) for node in getable_nodes: node_url = as_node_url(node) self._nids.append(node_url) nrt[node_url] = node self._nids.sort() self._sid = SUBSCRIPTION_MANAGER.create_polled(nrt) self._run = 1 self._schedule() return
def ConversionFactory(class_name, caller): # class_name is really provided by user via broadway.xml - we don't want # to just eval it. conversion_classes = {'LightFromRaw':LightFromRaw, 'TempFFromRaw':TempFFromRaw, 'TempCFromRaw':TempCFromRaw, 'SoilTempFFromRaw':SoilTempFFromRaw, 'AccelFromRaw':AccelFromRaw, 'ADCPrecisionFromRaw':ADCPrecisionFromRaw, 'ADCSingleFromRaw':ADCSingleFromRaw, 'BinaryValueFromRaw':BinaryValueFromRaw} if class_name in conversion_classes.keys(): return conversion_classes[class_name](caller) else: msg = 'could not find conversion class %s for %s' % (class_name, as_node_url(caller)) msglog.log('mpx.xbow', WARN, msg) return XbowConversion(caller)
def configure(self, config): set_attribute(self, 'debug', self.debug, config, int) self._init_debug() self.DEBUG3("configure(%r):\n", config) ConfigurableNode.configure(self, config) self._url = as_node_url(self) if self.output is REQUIRED: self.output = self.parent set_attribute(self, 'output', self.parent, config, self.as_deferred_node) set_attribute(self, 'input', REQUIRED, config, self.as_deferred_node) set_attribute(self, 'period', self.period, config, float) set_attribute(self, 'asyncOK', self.asyncOK, config, int) # in cov mode, only changing values are driven. set_attribute(self, 'cov_mode', self.cov_mode, config, int) set_attribute(self, 'enabled', 1, config, as_boolean) set_attribute(self, 'backoff_on_failure', 0, config, as_boolean) set_attribute(self, 'conversion', self.conversion, config, str) self._period = self.period return
def configuration(self): config = super(AlarmExporter, self).configuration() config['timeout'] = str(self.getattr('timeout')) config['retries'] = str(self.getattr('retries')) config['gm_time'] = str(self.getattr('gm_time')) config['description'] = str(self.getattr('description')) if self.connection: config['connection'] = as_node_url(self.connection) config["format"] = self.format if self.formatter: config['formatter'] = self.formatter.configuration() else: config["formatter"] = self.format_config.copy() config["transport"] = self.transport if self.transporter: config['transporter'] = self.transporter.configuration() else: config["transporter"] = self.transport_config.copy() return config
def ConversionFactory(class_name, caller): # class_name is really provided by user via broadway.xml - we don't want # to just eval it. conversion_classes = { 'LightFromRaw': LightFromRaw, 'TempFFromRaw': TempFFromRaw, 'TempCFromRaw': TempCFromRaw, 'SoilTempFFromRaw': SoilTempFFromRaw, 'AccelFromRaw': AccelFromRaw, 'ADCPrecisionFromRaw': ADCPrecisionFromRaw, 'ADCSingleFromRaw': ADCSingleFromRaw, 'BinaryValueFromRaw': BinaryValueFromRaw } if class_name in conversion_classes.keys(): return conversion_classes[class_name](caller) else: msg = 'could not find conversion class %s for %s' % ( class_name, as_node_url(caller)) msglog.log('mpx.xbow', WARN, msg) return XbowConversion(caller)
def _setup_options_file( self ): comment = '# Script generated by the broadway framework\n' \ '# Date: %s for %s\n' % ( str( time.ctime( time.time() ) ), as_node_url( self ) ) cf = properties.MPXINIT_CONF_FILE # Create CHAT file specific to this port. self._gen_chat_script( comment ) # Create ppp options specific to this port. self._gen_ppp_options( comment ) # Create chap secrets. self._gen_chap_secrets() # Create pap secrets. self._gen_pap_secrets() # Update nameservers. self._update_nameservers_file()
def as_node_input(self, name, node): if not node.has_method("get"): # The above if statement fails when nodes don't resolve. raise ValueError("node has no 'get' method: %s" % node) source = as_node_url(node) if self.inputs.has_child(name): node = self.inputs.get_child(name) if source != node.source: raise ValueError("Input with name exists: %r" % name) msglog.warn("Input exists: %r, %r" % (name, source)) else: running = self.inputs.is_running() if running: self.inputs.stop() node = CachedPoint() node.configure({"name": name, "source": source, "parent": self.inputs}) if running: self.inputs.start() return super(CachingCalculator, self).as_node_input(name, node)
def _setup_options_file(self): comment = '# Script generated by the broadway framework\n' \ '# Date: %s for %s\n' % ( str( time.ctime( time.time() ) ), as_node_url( self ) ) cf = properties.MPXINIT_CONF_FILE # Create CHAT file specific to this port. self._gen_chat_script(comment) # Create ppp options specific to this port. self._gen_ppp_options(comment) # Create chap secrets. self._gen_chap_secrets() # Create pap secrets. self._gen_pap_secrets() # Update nameservers. self._update_nameservers_file()
def remove_target(self, target): if not isinstance(target, str): targeturl = as_node_url(target) else: targeturl = target self.synclock.acquire() try: self.targets.remove(targeturl) except KeyError: removed = False message = "Target %s not removed from %s: does not exist." msglog.warn(message % (target, self)) else: try: self.targetmap.pop(targeturl) except KeyError: pass removed = True msglog.inform("Target %s removed from %s." % (target, self)) finally: self.synclock.release() return removed
def configure(self, config): Column.configure(self, config) set_attribute(self, 'context', 'None', config, str) set_attribute(self, 'function', REQUIRED, config) set_attribute(self, 'use_subscription_manager', 1, config, int) ## # @fixme HACK to work around too much voodoo to fix right now. self.__function_attribute = self.function set_attribute(self, 'conversion', as_magnitude, config, _function) self.original_function = self.function if type(self.function) == types.StringType: self.function = string.replace( self.function, 'self.', 'as_internal_node("%s").' % as_node_url(self)) set_attribute(self, 'args', '()', config) # fix for bad configuration if self.args == '': self.args = '()' self.__function_config = self.function self._last_time = None self._last_value = None self._period = self.parent.parent.period
def start(self): if not self.has_child('WS Response'): self.wsresponse = CompositeNode() self.wsresponse.configure({'parent': self, 'name': 'WS Response'}) self.xmlnodes = xmldata.XMLDataNode() self.xmlnodes.configure({ 'name': 'XML Nodes', 'parent': self.wsresponse }) self.soapnodes = soapdata.SOAPDataNode() self.soapnodes.configure({ 'name': 'SOAP Nodes', 'parent': self.wsresponse }) else: self.wsresponse = self.get_child('WS Response') self.xmlnodes = self.wsresponse.get_child('XML Nodes') self.soapnodes = self.wsresponse.get_child('SOAP Nodes') if not self.has_child('Start time'): nodepath = path.join(as_node_url(self.soapnodes), 'GetAlertsResult/disp/emralert') self.startnode = AttributeNode() self.startnode.configure({ 'name': 'Start time', 'nodeurl': nodepath, 'attribute': 'start', 'parent': self }) self.endnode = AttributeNode() self.endnode.configure({ 'name': 'End time', 'nodeurl': nodepath, 'attribute': 'end', 'parent': self }) self.setup() super(CNEDispatch, self).start()
def as_node_input(self, name, node): if not node.has_method("get"): # The above if statement fails when nodes don't resolve. raise ValueError("node has no 'get' method: %s" % node) source = as_node_url(node) if self.inputs.has_child(name): node = self.inputs.get_child(name) if source != node.source: raise ValueError("Input with name exists: %r" % name) msglog.warn("Input exists: %r, %r" % (name, source)) else: running = self.inputs.is_running() if running: self.inputs.stop() node = CachedPoint() node.configure({ "name": name, "source": source, "parent": self.inputs }) if running: self.inputs.start() return super(CachingCalculator, self).as_node_input(name, node)
def configure(self, config): set_attribute(self, 'debug', self.debug, config, int) self._init_debug() self.DEBUG3("configure(%r):\n", config) # @fixme Add the option to output on change (default), output every # time, or the check the outputs state. ConfigurableNode.configure(self, config) self._url = as_node_url(self) if self.output is REQUIRED: self.output = self.parent set_attribute(self, 'off_text', self.off_text, config, str) set_attribute(self, 'on_text', self.on_text, config, str) set_attribute(self, 'auto_text', self.auto_text, config, str) set_attribute(self, 'reverse_output', self.reverse_output, config, int) set_attribute(self, 'output', self.parent, config, self.as_deferred_node) set_attribute(self, 'input', REQUIRED, config, self.as_deferred_node) set_attribute(self, 'period', self.period, config, float) set_attribute(self, 'asyncOK', self.asyncOK, config, int) set_attribute(self, 'state', self.state, config, self._set_state) set_attribute(self, 'enabled', 1, config, as_boolean) set_attribute(self, 'min_on_time', 0, config, float) set_attribute(self, 'min_off_time', 0, config, float) return
def _configure_webdev_ftp(self, enable_webdev_user): my_node_url = as_node_url(self) inittab = InittabManager() assert inittab.has_key("MPX_FTPD"), ( "inittab must contain an MPX_FTPD group, run " + "moab/linux/broadway.moab.linux.install from it's local directory." ) daemon_name = 'ftpd' group = inittab["MPX_FTPD"] nodes = group.subset("REQUIRED_BY") has_my_node = nodes.has_item(my_node_url) docommit = 0 if enable_webdev_user: if not has_my_node: nodes.add_item(my_node_url) docommit = 1 else: if has_my_node: nodes.remove_item(my_node_url) docommit = 1 is_already_enabled = group.is_daemon_enabled(daemon_name) if nodes.num_items() > 0: # There are nodes requiring MPX_FTPD. Enable it, if it wasn't # already. if not is_already_enabled: group.enable_daemon(daemon_name) docommit = 1 else: # No nodes require MPX_FTPD. Disable it, if it wasn't already. if is_already_enabled: group.disable_daemon(daemon_name) docommit = 1 if docommit: inittab.commit() return