class FileChild(NodeLike): implements(IConfigurableNode) security = SecurityInformation.from_default() security.protect_get('openread', 'View') security.protect_get('openwrite', 'Configure') security.disallow_set('name') security.disallow_set('parent_path') security.disallow_set('url') secured_by(security) def __init__(self, manager, path, name): self.manager = manager self.parent_path = path self.name = name self._path = self.manager.makepath(self.parent_path, self.name) def openread(self, mode='rb'): if '+' in mode or 'w' in mode: raise ValueError('Cannot pass "w" or "+" to openread.') return self.manager._filesystem.open(self._path, mode) def openwrite(self, mode='wb'): return self.manager._filesystem.open(self._path, mode) def prune(self): return self.manager._filesystem.unlink(self._path)
class PropertyContainer(CompositeNode, ProxyMixin): security = SecurityInformation.from_default() secured_by(security) def __init__(self): CompositeNode.__init__(self) ProxyMixin.__init__(self) def configure(self, cd): CompositeNode.configure(self, cd) set_attribute(self, 'ptype', REQUIRED, cd) def configuration(self): cd = CompositeNode.configuration(self) get_attribute(self, 'ptype', cd) return cd security.protect('get_properties', 'View') def get_properties(self, filters=()): """ Get list of properties that have meta data matching any of the values in the provided name/value pairs. """ matches = [] filters = dict(filters).items() properties = set(self.children_nodes()) while properties and filters: name, values = filters.pop() for property in list(properties): metavalues = property.get_meta_values(name, set()) if metavalues.intersection(values): matches.append(property) properties.remove(property) return matches
class Level(ConfigurableNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self): ConfigurableNode.__init__(self) self.__device = None def set(self, value): level = self.name self.device.override(value, level) def get(self, skipCache=0): level = self.name return self.device.get_override_at(level) def _get_device(self): if self.__device is None: self.__device = self.parent.parent assert hasattr(self.__device, 'override'), ( 'Device must support the override method before associating priority arrays.' ) return self.__device device = property(_get_device)
class Alarm(NeoAlarm): """ This extension is provided to replace the new Alarm class with a version having reverse-compatibility hooks for PHWin's existing framework interaction. """ security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self.__in_alarm = Event() super(Alarm, self).__init__(*args) security.protect('trigger', 'Override') def trigger(self, *args, **kw): self.__in_alarm.set() return super(Alarm, self).trigger(*args, **kw) security.protect('clear', 'Override') def clear(self, *args, **kw): self.__in_alarm.clear() return super(Alarm, self).clear(*args, **kw) security.protect('set', 'Override') def set(self, value, *args): source = 'Alarm "set" adapter(PhWin)' timestamp = time.time() context = 'Invocation: "set(%s)"' % (value,) if value: self.trigger(source, timestamp, context) else: self.clear(source, timestamp, context) security.protect('get', 'View') def get(self, *args): return self.__in_alarm.isSet()
class NeoAlarm(ConfigurableNode): implements(IAlarm) security = SecurityInformation.from_default() secured_by(security) security.make_private('events') def __init__(self, *args): self.events = {} self.max_raised = "" self.max_cleared = "" self.max_accepted = "" #CSCtf98046:changing default priority to P1 self.priority = "P1" self.description = "" self.source = 'broadway' self.dispatcher = Dispatcher() super(NeoAlarm, self).__init__(*args) def configure(self,config): super(NeoAlarm, self).configure(config) self.setattr('source', config.get('source', self.source)) priority = config.get("priority", self.priority) #CSCtf98046-changing all the blank priorities to P1. PH alarms have blank priority. if priority == "": priority = "P1" self.setattr('priority', priority) if "max_raised" in config: policy = config["max_raised"] if policy: try: policy = int(policy) except ValueError: raise ValueError('Value of field \'Max raised\' is not numeric') self.max_raised = policy if "max_cleared" in config: policy = config["max_cleared"] if policy: try: policy = int(policy) except ValueError: raise ValueError('Value of field \'Max cleared\' is not numeric') self.max_cleared = policy if "max_accepted" in config: policy = config["max_accepted"] if policy: try: policy = int(policy) except ValueError: raise ValueError('Value of field \'Max accepted\' is not numeric') self.max_accepted = policy description = config.get("description", self.description) self.setattr('description', description) def configuration(self): config = super(NeoAlarm, self).configuration() config['source'] = self.getattr('source') config['priority'] = self.getattr('priority') config["max_raised"] = self.getattr("max_raised", str) config["max_cleared"] = self.getattr("max_cleared", str) config["max_accepted"] = self.getattr("max_accepted", str) config['description'] = self.getattr('description') return config security.protect('trigger', 'Override') def trigger(self, source, timestamp, context, information='', *args, **keywords): alarm_event = AlarmEvent(self) kwargs = {"message": information} triggered = AlarmTriggered(self, timestamp, source, context, **kwargs) self.dispatcher.dispatch(triggered) security.protect('clear', 'Override') def get_raised_policy(self): if isinstance(self.max_raised, int): policy = self.max_raised else: policy = self.parent.max_raised return policy def get_cleared_policy(self): if isinstance(self.max_cleared, int): policy = self.max_cleared else: policy = self.parent.max_cleared return policy def get_accepted_policy(self): if isinstance(self.max_accepted, int): policy = self.max_accepted else: policy = self.parent.max_accepted return policy def clear(self, source, timestamp, context, information='', *args, **keywords): cleared = AlarmCleared(self, timestamp, source, context) self.dispatcher.dispatch(cleared) def prune(self, *args, **kw): events = self.get_events() self._terminate_events(events, "source alarm removed") return super(NeoAlarm, self).prune(*args, **kw) def _trim_backlog(self, state, limit): events = self.events_by_state(state) sortable = [(event.created(), event) for event in events] events = [event for ts,event in reversed(sorted(sortable))][limit:] return self._terminate_events(events, "trim %r event backlog" % state) def _terminate_events(self, events, reason): for event in events: try: event.terminate("Alarm %r" % self.name, reason) except: message = "Failed to terminate: %s." msglog.warn("Failed to terminate: %s." % event) msglog.exception(prefix="handled") else: message = "Alarm %r terminated event %s: %s." msglog.inform(message % (self.name, event, reason)) return events def get_event(self, id): return self.events[id] # Trick to make Framework's as_node work with AlarmEvents. get_child = get_event def get_events(self): return self.events.values() def get_events_dictionary(self): states = {"raised": [], "accepted": [], "cleared": [], "closed": []} for event in self.get_events(): states[event.state.lower()].append(event) return states def get_event_count(self): return len(self.get_events()) def get_event_counts(self): states = self.get_events_dictionary() return dict([(st, len(evs)) for st,evs in states.items()]) def events_by_state(self, state, negate=False): state = state.upper() events = self.get_events() if negate: events = [event for event in events if event.state != state] else: events = [event for event in events if event.state == state] return events def get_raised(self): return self.events_by_state('raised') def get_accepted(self): return self.events_by_state('accepted') def get_cleared(self): return self.events_by_state('cleared') def get_closed(self): return self.events_by_state('closed') def get_not_raised(self): return self.events_by_state('raised', True) def get_not_accepted(self): return self.events_by_state('accepted', True) def get_not_cleared(self): return self.events_by_state('cleared', True) def get_not_closed(self): return self.events_by_state('closed', True) def dispatch(self, event): if isinstance(event, AlarmEventRaised): self.events[event.source.GUID] = event.source self.dispatcher.dispatch(event.source) self.parent.dispatch(event.source) self.dispatcher.dispatch(event) result = self.parent.dispatch(event) if isinstance(event, AlarmEventRaised): policy = self.get_raised_policy() if policy > 0: self._trim_backlog("raised", policy) elif isinstance(event, AlarmEventAccepted): policy = self.get_accepted_policy() if policy > 0: self._trim_backlog("accepted", policy) elif isinstance(event, AlarmEventCleared): policy = self.get_cleared_policy() if policy > 0: self._trim_backlog("cleared", policy) elif isinstance(event, AlarmEventClosed): del(self.events[event.source.GUID]) return result def __str__(self): typename = type(self).__name__ return "%s(%r)" % (typename, self.as_node_url()) def __repr__(self): return "<%s at %#x>" % (self, id(self))
class PolicyManager(SecurityService): implements(IPolicyManager) security = SecurityInformation.from_default() secured_by(security) security.protect('add_child', 'Manage Users') security.protect('remove_child', 'Manage Users') security.protect('rename_child', 'Manage Users') def __init__(self, *args): self.__default = None self.__started = False super(PolicyManager, self).__init__(*args) def __get_default(self): if not self.__started: self.start() return self.__default default = property(__get_default) def get_permissions(): return security.permissions[:] get_permissions = staticmethod(get_permissions) def get_policies(self): if not self.__started: self.start() return self.children_nodes() def get_policy(self, name): if not self.__started: self.start() return self.get_child(name) def has_policy(self, name): if not self.__started: self.start() return self.has_child(name) # to be used only by UI to determine if the # logged in user should be allowed to view # all the nodes or not based on the permissions. security.protect('is_manage_users_capable', 'Manage Users') def is_manage_users_capable(self): return True def get_context_policies(self, context, ascending=True): if not self.__started: self.start() if not isinstance(context, str): context = ISecurityContext(context).url active = [] children = self.children_nodes() ranked = [] for child in children: try: ranked.append((child.rank_match(context), child)) except ENotRunning: # Only log once for consecutive failures. if (child.__ENotRunning_logged % 1000) == 0: msglog.log('broadway', msglog.types.WARN, 'Policy "%s" not running.' % child.name) child.__ENotRunning_logged += 1 else: child.__ENotRunning_logged = 0 ranked.sort() ranked.reverse() for rank, child in ranked: if rank: active.insert(0, child) else: break if not child.acquires: break if not ascending: active.reverse() return active def start(self): if not self.__started: self.__started = True default = self.__create_default('Default', '/', ['name', 'context']) if self.role_manager.authenticated is not None: default.set_permissions(self.role_manager.authenticated, 'View') if self.role_manager.manager is not None: default.set_permissions(self.role_manager.manager, ['View', 'Configure', 'Override']) # Callback reference to static 'get_permissions' method means # queries for assigned permissions of this role will return # all defined permissions available. if self.role_manager.administrator is not None: default.set_permissions(self.role_manager.administrator, self.get_permissions) self.__default = default http = self.__create_default( 'HTTP Files', '/services/network/http_server/http_file_handler', ['name']) https = self.__create_default( 'HTTPS Files', '/services/network/https_server/https_file_handler', ['name']) super(PolicyManager, self).start() def add_child(self, child): result = super(PolicyManager, self).add_child(child) child.__ENotRunning_logged = 0 return result def __create_default(self, policyname, context, readonly=()): if self.has_child(policyname): policy = self.get_child(policyname) else: policy = self.nodespace.create_node(Policy) msglog.log( 'broadway', msglog.types.INFO, 'Policy Manager creating default policy "%s".' % policyname) config = { 'parent': self, 'name': policyname, 'context': context, 'is_default': True } policy.configure(config) policy.readonly = list(readonly) return policy
class GlobalSetpointManager(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self): self._qm = None super(GlobalSetpointManager, self).__init__() def start(self): PERSISTANCE_MANAGER.disable_persist() count = 0 start = time.time() for group_path in PERSISTANCE_MANAGER.get_gsp_groups(): try: deserialize_node(PERSISTANCE_MANAGER.get_gsp_group(group_path)) count += 1 except: msglog.exception() PERSISTANCE_MANAGER.enable_persist() message = 'Global Setpoint Manager restored %d nodes in %f seconds.' % \ (count, time.time() - start) msglog.log('Global Setpoint Manager', msglog.types.INFO, message) super(GlobalSetpointManager, self).start() security.protect('get_point_purposes', 'View') def get_point_purposes(self): return META_REGISTRY.get_registered_meta_values('purpose') security.protect('discover_by_name', 'View') def discover_by_name(self, entity_path, name='*'): if not entity_path.startswith(EM): entity_path = EM + entity_path matches = self.qm.fetch({ 'query': { 'name': name, 'context': entity_path } }).get('items') matches.sort() return matches security.protect('discover_by_type', 'View') def discover_by_type(self, entity_path, purposes): if not entity_path.startswith(EM): entity_path = EM + entity_path matches = self.qm.fetch({ "query": { "name": "*" }, "properties": { "purpose": purposes } }).get('items') result = {} for property_url in matches: try: prop_ref = as_node(property_url) entity_path = as_entity_url(prop_ref.entity) if not result.has_key(entity_path): result[entity_path] = {} prop_id = [prop_ref.type, prop_ref.name] for purpose in prop_ref.get_meta_values('purpose'): if purpose in purposes: if result[entity_path].has_key(purpose): result[entity_path][purpose].append(prop_id) else: result[entity_path][purpose] = [prop_id] except: message = 'Error adding value to discover_by_type result set' msglog.log('Global Setpoint Manager', msglog.types.INFO, message) msglog.exception() return result ## # Return a list of setpoint groups that are associated with a entity. # # @param entity_path The path to the entity. # @return None security.protect('get_groups_names', 'View') def get_groups_names(self, entity_path): return [grp.name for grp in self.get_group_instances(entity_path)] security.protect('get_groups_paths', 'View') def get_groups_paths(self, entity_path): return [ grp.as_node_url() for grp in self.get_group_instances(entity_path) ] def get_group_instances(self, entity_path): group_container = self._get_group_container(entity_path) if group_container is None: group_instances = [] else: group_instances = [grp for grp in group_container.children_nodes() if \ isinstance(grp, GlobalSetpointGroup)] return group_instances def get_group_instance(self, entity_path, group_name): group = None group_container = self._get_group_container(entity_path) if group_container and group_container.has_child(group_name): group = group_container.get_child(group_name) return group def _get_group_container(self, entity_path): group_container = None if not entity_path.startswith(EM): entity_path = EM + entity_path entity = as_node(entity_path) for child in entity.children_nodes(): if isinstance(child, GlobalSetpointGroupContainer): group_container = child break return group_container ## # Save or update the group information associated with the referenced entity # with the specified configuration. If the group does not exist, then a new # group is created. # # @param entity_path The path to the entity. # @param group_name The name of the group. # @param config The group configuration data. # @return A dictionary providing information about the group configuration. security.protect('update_group', 'Configure') def update_group(self, entity_path, group_name, config): group = self.get_group_instance(entity_path, group_name) if not group: group = self._create_group(entity_path, group_name, config) group.update_group_config(config) return group.get_group_config() def _create_group(self, entity_path, group_name, config): group_container = self._get_group_container(entity_path) if group_container is None: if not entity_path.startswith(EM): entity_path = EM + entity_path group_container = GlobalSetpointGroupContainer() config = {'name': 'Global Setpoints', 'parent': entity_path} group_container.configure(config) elif group_container.has_child(group_name): raise ENameInUse() group = GlobalSetpointGroup() config = { 'name': group_name, 'parent': group_container, 'entity_path': entity_path } group.configure(config) group.start() return group ## # Remove a group at the specified entity path. If the path points to a # non-existent group, then no error is raised and the function returns # success. # # @param entity_path The path to the entity. # @param group_name The name of the group. . # @return None security.protect('remove_group', 'Configure') def remove_group(self, entity_path, group_name): group = self.get_group_instance(entity_path, group_name) if group: PERSISTANCE_MANAGER.remove_gsp_group(group.as_node_url()) group.prune() ## # Move the group at the source path to the destination path. # # @param entity_path The path to the entity. # @param group_name The name of the group. . # @return None security.protect('move_group', 'Configure') def move_group(self, entity_path_src, entity_path_dst, group_name): pass ## # Get the configuration information of the group at the specified entity # path # # @param entity_path The path to the entity. # @param group_name The name of the group. . # @return A dictionary providing information about the group configuration. security.protect('get_group', 'View') def get_group(self, entity_path, group_name): group = self.get_group_instance(entity_path, group_name) if not group: raise ENoSuchName() return group.get_group_config() ## following methods are delegated to an instance of a GlobalSetpointGroup def update_group_config(self, entity_path, group_name, config): return self.get_group_instance(entity_path, group_name).update_group_config(config) def get_group_config(self, entity_path, group_name): return self.get_group_instance(entity_path, group_name).get_group_config() ## # Return a list of the entities managed by the group. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @return A list of paths to the entities that are managed by this node. security.protect('get_entities_paths', 'View') def get_entities_paths(self, entity_path, group_name): return self.get_group_instance(entity_path, group_name).get_entities_paths() ## # Add or update one or more entities to the list of entities managed by this # group and configure the mapping between the entities properties and setpoint # items. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @param entity_map A list of Entity Mapping's. # @return A list of entity mappings that are managed by this node. security.protect('update_entity_mapping', 'Configure') def update_entity_mapping(self, entity_path, group_name, entity_map): return self.get_group_instance( entity_path, group_name).update_entity_mapping(entity_map) ## # Retrieve the current entity map associated with this global setpoint group # # @return A list of entity mappings that are managed by this node. security.protect('get_entity_mapping', 'View') def get_entity_mapping(self, entity_path, group_name): return self.get_group_instance(entity_path, group_name).get_entity_mapping() ## # Delete a set of entity mappings from the entities managed by this # Global Setpoint Group. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @param entities The list of Entity Mappings to be removed. # @return A list of entity mappings that are managed by this node. security.protect('remove_entity_mapping', 'Configure') def remove_entity_mapping(self, entity_path, group_name, entities): return self.get_group_instance( entity_path, group_name).remove_entity_mapping(entities) ## # Pushes the values of the setpoint items to the properties of the entities # mapped to each setpoint item. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @return An identifier for the transaction or process id that can be # queried later to find out the progress of the process. If a GSG controls # a large number of groups, then the call will take a long time to return. # This strategy avoids the possible HTTP network timeout that would occur if # the framework took too long to set all of the entities. Instead, the client # will being a poll to check for the process. security.protect('push_values', 'Override') def push_values(self, entity_path, group_name): return self.get_group_instance(entity_path, group_name).push_values() ## # Releases the values of the setpoint items to the properties of the entities # mapped to each setpoint item. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @return An identifier for the transaction or process id that can be # queried later to find out the progress of the process. If a GSG controls # a large number of groups, then the call will take a long time to return. # This strategy avoids the possible HTTP network timeout that would occur if # the framework took too long to set all of the entities. Instead, the client # will being a poll to check for the process. security.protect('release_setpoint', 'Override') def release_setpoint(self, entity_path, group_name, setpoint_id, priority_level): return self.get_group_instance(entity_path, group_name).release_setpoint( setpoint_id, priority_level) ## # Requests the progress of the push values for the specified transaction_id # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @param transaction_id An array of entity mappings. # @return A transaction status object containing the following attributes. # - completed: True if the process has completed, False otherwise. # - success: Present only if "completed" is set to True. True if the # process managed to set all the properties, False otherwise. # - report_items: Present only if completed is set to True. An array of # Error Report Items. security.protect('get_push_values_progress', 'View') def get_push_values_progress(self, entity_path, group_name, transaction_id): return self.get_group_instance( entity_path, group_name).get_push_values_progress(transaction_id) ## # Creates a poll group for the values of the properties of the managed entities # in the setpoint group. # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @return A string value representing the identifer of the poll group. security.protect('create_polled', 'View') def create_polled(self, entity_path, group_name, node_reference_table=None, timeout=300): return self.get_group_instance(entity_path, group_name).create_polled() ## # Destroys a poll group # # @param poll_id A string value representing the identifier of the poll group. # @return None security.protect('destroy', 'View') def destroy(self, entity_path, group_name, poll_id): return self.get_group_instance(entity_path, group_name).destroy(poll_id) ## # Polls for the values of the entities managed by the setpoint group given the # poll_id # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @param poll_id The string value representing the indentifier of the # poll group, as returned from create_polled. # @return A dictionary keyed by "(entity_path, property)", the value being a # result object. security.protect('poll_all', 'View') def poll_all(self, entity_path, group_name, poll_id): return self.get_group_instance(entity_path, group_name).poll_all(poll_id) ## # Like poll_all but returns only those entity/property pairs whose values # have changed since the last time poll_* was called, referencing a particular # poll_id # # @param entity_path The entity that is the owner of this setpoint group. # @param group_name The name of the group. # @param poll_id The string value representing the indentifier of the # poll group, as returned from create_polled. # @return A dictionary keyed by "(entity_path, property)", the value being a # result object. security.protect('poll_changed', 'View') def poll_changed(self, entity_path, group_name, poll_id): return self.get_group_instance(entity_path, group_name).poll_changed(poll_id) def singleton_unload_hook(self): pass def _get_qm(self): if self._qm is None: self._qm = as_node('/services/Query Manager') return self._qm qm = property(_get_qm)
class OverrideMixin(object): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args, **kw): self.__priority_array = None self.__relinquish_default = kw.get('relinquish_default', None) self.__active = '17' self._set_target = self def restore_override(self): self.__priority_array = OVERRIDE_MANAGER.get_array(self.as_node_url()) self.__update_active() current_default = self.get_default() cd_default = self.__priority_array[CONFIG_DEFAULT] if self.__relinquish_default != cd_default: self.__priority_array[CONFIG_DEFAULT] = dflt if dlft != current_default: self.__priority_array[RELINQUISH_DEFAULT] = dflt if not self.has_child('Priority Array'): pa = PriorityArrayNode() cd = {'name':'Priority Array', 'parent':self} pa.configure(cd) for idx in range(1, 16+1): level = Level() cd = {'name':str(idx), 'parent':pa} level.configure(cd) security.protect('override', 'Override') def override(self, value, level=16): override_list = [] pre_ovr_values_list = [] released = False active_level = self.__get_active() active_value = self.__priority_array[active_level] if isinstance(value, OverrideDict): current_ovr = self.get_override() for pa_level in range(1, 16+1): ovr = value.get_override(pa_level) if isinstance(ovr, StringType): try: ovr = float(ovr) except: pass if ovr != current_ovr.get_override(pa_level): override_list.append((ovr, pa_level)) else: if isinstance(value, StringType): try: value = float(value) except: pass override_list.append((value, level)) for value, level in override_list: # for historical reasons, override(None, level) has the same behavior # as release(level) if value is None or value == 'None': self.release(level) continue level = str(level) try: self.__assert_level(level) except: continue if self.__priority_array[level] != value: pre_ovr_values_list.append((self.__priority_array[level], level)) self.__priority_array[level] = value OVERRIDE_MANAGER.notify_changed( self.as_node_url(), self.__priority_array ) if int(level) <= int(self.__get_active()): self.__active = level new_active_level = self.__get_active() new_active_value = self.__priority_array[new_active_level] #CSCtn72781 if active_level != new_active_level or active_value != new_active_value or \ (active_level == new_active_level and active_value == new_active_value): # this latest override takes precedence if hasattr(self._set_target, 'set_proxy'): try: self._set_target.set_proxy(new_active_value) except: #Revert values if Device is offline and Override fails for prior_value, level in pre_ovr_values_list: self.__priority_array[level] = prior_value OVERRIDE_MANAGER.notify_changed(self.as_node_url(), self.__priority_array) else: try: self._set_target.set(new_active_value) except: for prior_value, level in pre_ovr_values_list: self.__priority_array[level] = prior_value OVERRIDE_MANAGER.notify_changed(self.as_node_url(), self.__priority_array) security.protect('release', 'Override') def release(self, level): self.__assert_level(level) level = str(level) pa = self.__priority_array last_value = pa[level] pa[level] = None OVERRIDE_MANAGER.notify_changed(self.as_node_url()) self.__update_active() active = self.__get_active() value = pa[active] if value != last_value: if not(active == RELINQUISH_DEFAULT and value == None): if hasattr(self._set_target, 'set_proxy'): self._set_target.set_proxy(value) else: self._set_target.set(value) security.protect('get_override', 'View') def get_override(self): pa = self.__priority_array.copy() active = self.__get_active() default = self.get_default() return OverrideDict(pa, default) security.protect('get_override_at', 'View') def get_override_at(self, level): return self.__priority_array[str(level)] security.protect('set_default', 'Override') def set_default(self, value): self.__priority_array[RELINQUISH_DEFAULT] = value security.protect('get_default', 'View') def get_default(self): return self.__priority_array[RELINQUISH_DEFAULT] security.protect('get_write_priority', 'View') def get_write_priority(self): wp = int(self.__get_active()) if wp == 17: wp = None return wp def __get_active(self): return self.__active def __assert_level(self, level): assert_level(level) def __update_active(self): active = RELINQUISH_DEFAULT for index in range(1,17): index = str(index) if self.__priority_array[index] is not None: active = index break self.__active = active
class ScheduleManager(CompositeNode, EventProducerMixin, EventConsumerMixin): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args, **kw): super(ScheduleManager, self).__init__(*args, **kw) EventProducerMixin.__init__(self) EventConsumerMixin.__init__(self, self.event_handler) self.remotes_loaded = False self.__running = False self._hm = None self._ctlsvc = None self._hm_scheduled = None self._ph_scheduled = None self._ph_loader_scheduled = None self._ph_loaded = False self.__last_pruned = None self._proxied_manager = _ProxiedScheduleManager(self) self.__legacy_schedules = [] self.__legacy_needs_pruning = [] self.__ph_legacy_needs_pruning = [] def configure(self, config): super(ScheduleManager, self).configure(config) # "hidden" configuration option that can be overridden if a user # installed schedules in an abnormal place. sched_holder = '/services/time/local' set_attribute(self, 'sched_holder', sched_holder, config) config['dflt_sched_prio'] = config.get('dflt_sched_prio', scheduler.DFLT_SCHED_PRIO) set_attribute(self, 'dflt_sched_prio', REQUIRED, config, int) scheduler.DFLT_SCHED_PRIO = self.dflt_sched_prio def configuration(self): config = super(ScheduleManager, self).configuration() get_attribute(self, 'sched_holder', config) get_attribute(self, 'dflt_sched_prio', config) return config def start(self): if self.is_running(): return if as_node('/services').has_child('Entity Manager'): em = as_node('/services/Entity Manager') if not em.is_running(): # see if this function is already wrapped. If it is wrapped # im_self is not an attribute. if hasattr(em.do_start, 'im_self'): em.do_start = wrapped(em.do_start, None, self.do_start) return self.do_start() def do_start(self): self.message('Schedule Manager starting.') schedule_ph_prune = False scheds = PERSISTANCE_MANAGER.get_scheds() proxy_prune_list = [] for sched in scheds: node_info = {} try: node_info = PERSISTANCE_MANAGER.get_sched_cfg(sched) if node_info.get('factory').count( 'ProxiedHierarchialScheduler'): host_url = node_info.get('cfg').get('host_url') try: as_node(host_url) except ENoSuchName: proxy_prune_list.append(sched) continue sched_node = create_node(node_info) uuid = node_info.get('cfg').get('uuid') if not uuid or uuid == 'None': # uuid was added later - below code to deal with persisting # of that property. PERSISTANCE_MANAGER.put_sched(sched_node.as_node_url(), serialize_node(sched_node)) #except: # msglog.exception() # continue if not isinstance(sched_node, ProxiedHierarchialScheduler): # proxied schedules store locally. Restore summary, properties # and meta for local. url = sched_node.as_node_url() properties = PERSISTANCE_MANAGER.get_sched_props(url) if properties: sched_node.set_properties(properties, save=False) meta = PERSISTANCE_MANAGER.get_sched_meta(url) if meta: sched_node.set_event_meta(meta) if not isinstance(sched_node, (DelegatedHierarchialScheduler, CtlSvcDelegatedHierarchialScheduler)): sched_node._set_summary( PERSISTANCE_MANAGER.get_sched_summary(url)) sched_node.set_override( PERSISTANCE_MANAGER.get_override(url)) if isinstance(sched_node, DelegatedHierarchialScheduler): # keep track of the "legacy" schedules we are delegating to delegate = sched_node.configuration().get('delegate') try: # see if the target still exists. as_node(delegate) self.__legacy_schedules.append(delegate) except: # the legacy schedule disappeared on us. # schedule it for removal, iff it doesn't have children if isinstance(sched_node, CtlSvcDelegatedHierarchialScheduler): schedule_ph_prune = True self.__ph_legacy_needs_pruning.append(sched_node) else: self.__legacy_needs_pruning.append(sched_node) elif isinstance(sched_node, ProxiedHierarchialScheduler): host_url = sched_node.host_url uuid = sched_node.configuration().get('uuid') self._proxied_manager.register_persisted( host_url, uuid, sched_node) try: sched_node.start() except: msglog.exception() except: msglog.exception() #continue #LOOP ENDS proxy_prune_list.sort(sched_sort) for sched in proxy_prune_list: msg = 'Removing schedule %s for non existent host.' % urllib.unquote( sched) self.message(msg, level=0) PERSISTANCE_MANAGER.remove_sched(sched) self._load_schedules() self._prune_schedules(self.__legacy_needs_pruning) try: remote_hosts = self.host_manager.children_nodes() except: remote_hosts = [] self.load_remote_hosts(remote_hosts) if schedule_ph_prune: # there's control service scheduled to care about. self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules) self.__running = True def is_running(self): return self.__running def event_handler(self, event): if isinstance(event, ScheduleCreatedEvent) and \ event.source.name == 'TIM': schedule = event.schedule for sched_node in self.__ph_legacy_needs_pruning: if as_node(sched_node.configuration().get( 'delegate')) is schedule: # we already have a reference to this schedule. It's now # been created by the control service. self.__ph_legacy_needs_pruning.remove(sched_node) self.__legacy_schedules.append(delegate) return # create a new schedule if we don't have a reference already if not schedule.as_node_url() in self.__legacy_schedules: self._create_legacy_schedule( schedule, CtlSvcDelegatedHierarchialScheduler) def _load_schedules(self): self._load_legacy_schedules() self._load_bacnet_schedules() self._load_ctlsvc_schedules() def _load_legacy_schedules(self): sched_holders = [] try: for child in as_node(self.sched_holder).children_nodes(): if isinstance(child, Schedules) and child.name != 'TIM': sched_holders.append(child) except ENoSuchName: pass for sched_holder in sched_holders: self._load_schedule_group(sched_holder, DelegatedHierarchialScheduler) def _load_bacnet_schedules(self): sched_holders = [] try: for child in as_node(self.sched_holder).children_nodes(): if isinstance(child, BacnetSchedules): sched_holders.append(child) except ENoSuchName: pass for sched_holder in sched_holders: self._load_schedule_group(sched_holder, DelegatedHierarchialScheduler) def _load_ctlsvc_schedules(self): if self._ctl_svc_running(): sched_holder = None try: sched_holder = as_node(self.sched_holder).get_child('TIM') except: self._ph_loaded = True if sched_holder: sched_holder.event_subscribe(self, ScheduleCreatedEvent) self._load_schedule_group(sched_holder, CtlSvcDelegatedHierarchialScheduler) self._ph_loaded = True return self._ph_loader_scheduled = sys_scheduler.seconds_from_now_do( 60, self._load_ctlsvc_schedules) def _load_schedule_group(self, sched_holder, klass): for sched in sched_holder.children_nodes(): try: self._create_legacy_schedule(sched, klass) except: msg = 'Error adding RZSchedule %s' % sched.as_node_url() self.message(msg) msglog.exception() def _create_legacy_schedule(self, sched, klass): sched_path = sched.as_node_url() if sched_path not in self.__legacy_schedules: do_create = True for s in self.__legacy_needs_pruning: if sched_path == s.configuration().get('delegate'): do_create = False break if do_create: for s in self.__ph_legacy_needs_pruning: if sched_path == s.configuration().get('delegate'): do_create = False break if do_create: new_sched = klass() name = sched.name.replace(':', '_') cd = { 'name': name, 'parent': self, 'delegate': sched.as_node_url() } new_sched.configure(cd) nodepath = new_sched.as_node_url() PERSISTANCE_MANAGER.put_sched(nodepath, serialize_node(new_sched)) new_sched.set_override(True) new_sched.start() self.__legacy_schedules.append(sched_path) def _prune_schedules(self, prune_list): while 1: try: sched_node = prune_list.pop() delegate = sched_node.configuration().get('delegate') if not delegate in self.__legacy_schedules and \ not sched_node.children_nodes(): # no children, it can be removed msg = 'Removing abandoned legacy schedule %s' % delegate self.message(msg) self.remove_schedule(sched_node, 1) except IndexError: break def _prune_legacy_ph_schedules(self): if not self._ph_scheds_loaded(): self._ph_scheduled = sys_scheduler.seconds_from_now_do( 60, self._prune_legacy_ph_schedules) else: self._prune_schedules(self.__ph_legacy_needs_pruning) def _ph_scheds_loaded(self): loaded = False if self._ctl_svc_running() and self._ph_loaded: loaded = True return loaded def _ctl_svc_running(self): running = True if not self.ctlsvc or self.ctlsvc.get().lower() != 'running': running = False return running def load_remote_hosts(self, remote_hosts): thread = Thread(name=self.name, target=self._load_remote_hosts, args=(remote_hosts, )) thread.start() def is_loading(self): return not self.remotes_loaded def is_loaded(self): return self.remotes_loaded def _load_remote_hosts(self, remote_hosts): failed = [] for host in remote_hosts: try: sched_manager = host.as_remote_node( '/services/Schedule Manager') self._load_remote_schedules(sched_manager, host) except: message = 'Unable to load remote schedules from host: %s' % host.name self.message(message) msglog.exception() failed.append(host) self.remotes_loaded = True #@fixme - convert to an event based approach, triggered by changes #downstream. if remote_hosts: #failed: #self._hm_scheduled = sys_scheduler.seconds_from_now_do( # 60, self.load_remote_hosts, failed #) self._hm_scheduled = sys_scheduler.seconds_from_now_do( 300, self.load_remote_hosts, remote_hosts) def _load_remote_schedules(self, sched_manager, host): hostname = host.name host_url = host.as_node_url() for sched_name in sched_manager.children_schedule_names(): name = '[%s] %s' % (hostname, sched_name) try: schedule = sched_manager.get_child(sched_name) uuid = schedule.get_uuid() if self._proxied_manager.was_persisted(host_url, uuid): persisted_sched = self._proxied_manager.get_persisted( host_url, uuid) if persisted_sched.name != name: from_path = persisted_sched.as_node_url() fsplit = from_path.split('/') fsplit[-1] = name to_path = '/'.join(fsplit) # update the link to the schedule that it proxies. cd = persisted_sched.configuration() cd['proxy'] = schedule persisted_sched.configure(cd) self.move_schedule(from_path, to_path, 1) self._proxied_manager.register_active( host_url, uuid, schedule) else: new_schedule = ProxiedHierarchialScheduler() cd = { 'name': name, 'parent': self, 'proxy': schedule, 'host_url': host_url, 'uuid': uuid } new_schedule.configure(cd) nodepath = new_schedule.as_node_url() PERSISTANCE_MANAGER.put_sched(nodepath, serialize_node(new_schedule)) new_schedule.start() new_schedule.set_host(host) self._proxied_manager.register_new(host_url, uuid, new_schedule) persisted_sched = new_schedule persisted_sched.refresh_children_names() except: msglog.exception() self._proxied_manager.prune_inactive(host_url) self._proxied_manager.clear_active(host_url) def message(self, message, mtype=msglog.types.INFO, level=1): if self.debug >= level: msglog.log('Scheduler', mtype, message) ## # Create a new schedule # # @param name The schedules name # @param parent The uri of the parent where this schedule should be attached. # @return None security.protect('create_schedule', 'Configure') def create_schedule(self, name, parent): '''create a schedule''' parent = self._get_schedule_node(parent) schedule = HierarchialScheduler() cd = {'name': name, 'parent': parent} schedule.configure(cd) nodepath = schedule.as_node_url() PERSISTANCE_MANAGER.put_sched(nodepath, serialize_node(schedule)) if parent is not self: # do not change override status schedule._set_summary(parent.get_summary()) else: schedule.set_override(True) schedule.start() PERSISTANCE_MANAGER.put_sched_summary(nodepath, schedule.get_summary()) self.event_generate(ScheduleCreatedEvent(self, schedule)) ## # Removes a schedule from the scheduling hiearchy # # @param schedule The uri of the schedule to be removed. # @return None security.protect('remove_schedule', 'Configure') def remove_schedule(self, schedule, force=0): if type(schedule) is types.ListType: schedule.sort(sched_sort) schedule.reverse() exceptions = {} for sched in schedule: try: sched = self._get_schedule_node(sched) self.remove_schedule(sched, force) except Exception, e: exceptions[sched.as_node_url()] = str(current_exception()) if exceptions: raise EBatchedException('remove_schedule', exceptions) return schedule = self._get_schedule_node(schedule) # the order here matters, not force should be checked before # get_meta() call. This could be cleaning up b\c remote schedule is # gone. if not force and schedule.get_meta().get('immutable'): err_msg = 'Runtime removal of schedule %s is not supported' % schedule.name raise EImmutable(err_msg) if not isinstance(schedule, ProxiedHierarchialScheduler): for child_sched in schedule.children_schedules(): # re-parent schedule self.move_schedule(child_sched, schedule.parent) schedule_url = schedule.as_node_url() self.__last_pruned = schedule try: PERSISTANCE_MANAGER.remove_sched(schedule.as_node_url()) except: msglog.exception() schedule.prune() self.event_generate(ScheduleRemovedEvent(self, schedule_url))
class OverridableProperty(Property, OverrideMixin): security = SecurityInformation.from_default() secured_by(security) implements(IAliasNode) def __init__(self): Property.__init__(self) OverrideMixin.__init__(self) self._start_failed = False def start(self): if not self.has_child('_status'): status = StatusNode() status.configure({'name': '_status', 'parent': self}) if not self.is_remote(): try: subject = self.get_subject() except: # we're unable to reference the subject - it could be a commandable # bacnet property - until we know for sure, raise ENotRunning self._start_failed = True return if isinstance(subject, BINObjectInstance): try: # confirm that it is in fact, commandable subject.get_child('87').get() except: self.restore_override() else: self.__class__ = BACnetOverrideableProperty BACnetOverrideableProperty.start(self) else: self.restore_override() self._start_failed = False Property.start(self) def is_running(self): run_status = Property.is_running(self) if not run_status: if self._start_failed: self.start() run_status = Property.is_running(self) return run_status def set(self, value): if self.is_remote(): self.get_subject().set(value) else: self.override(value, 16) def set_proxy(self, value): self.get_subject().set(value) security.protect('get_override', 'View') def get_override(self): if self.is_remote(): return self.get_subject().get_override() if not self.is_running(): raise ENotRunning() return OverrideMixin.get_override(self) security.protect('override', 'Override') def override(self, value, level=16): if self.is_remote(): return self.get_subject().override(value, level) if not self.is_running(): raise ENotRunning() return OverrideMixin.override(self, value, level) security.protect('release', 'Override') def release(self, level): if self.is_remote(): return self.get_subject().release(level) if not self.is_running(): raise ENotRunning() return OverrideMixin.release(self, level) security.protect('get_override_at', 'View') def get_override_at(self, level): if self.is_remote(): return self.get_subject().get_override_at(level) if not self.is_running(): raise ENotRunning() return OverrideMixin.get_override_at(self, level) security.protect('get_write_priority', 'View') def get_write_priority(self): if self.is_remote(): return self.get_subject().get_write_priority(self) if not self.is_running(): raise ENotRunning() return OverrideMixin.get_write_priority(self) security.protect('set_default', 'Override') def set_default(self, value): if self.is_remote(): return self.get_subject().set_default(value) if not self.is_running(): raise ENotRunning() return OverrideMixin.set_default(self, value) security.protect('get_default', 'View') def get_default(self): if self.is_remote(): return self.get_subject().get_default() return OverrideMixin.get_default(self) def _get_override(self): if self.is_remote(): return self.get_subject()._get_override() return OverrideMixin.get_override(self) def get_subject(self): if self._subject is None: self._subject = Property.get_subject(self) return self._subject
class UserManager(SecurityService): implements(IUserManager) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): # This is required for use of older system, remove later... self.__system_lock = threading.Lock() self.__renaming_user = None self.__running = threading.Event() self.__sysadmin = None self.__anonymous = None super(UserManager, self).__init__(*args) def __get_sysadmin(self): return self.__sysadmin sysadmin = property(__get_sysadmin) def __get_anonymous(self): return self.__anonymous anonymous = property(__get_anonymous) def get_users(self): children = self.children_nodes() return children def get_user(self, name): child = self.get_child(name) return child def get_user_names(self): names = self.children_names() return names def has_user(self, username): return self.has_child(username) # to be used only by UI to determine if the # logged in user should be allowed to view # all the nodes or not based on the permissions. security.protect('is_manage_users_capable', 'Manage Users') def is_manage_users_capable(self): return True def user_from_object(self, userobject): if isinstance(userobject, User): username = userobject.name elif isinstance(userobject, OGUserObject): username = userobject.name() if username == 'NoneUser': username = '******' elif userobject is None: username = '******' else: error = 'User object can be type: User, OGUserObject, or None. ' error += 'Not %s.' % type(userobject) raise TypeError(error) try: return self.get_user(username) except ENoSuchName, e: # TODO: Determine if this is the right place. # Put in to handle Linux system user names that do not # have security manager counterparts (nobody,mail,et al). from mpx.service.user_manager import EAuthenticationFailed raise EAuthenticationFailed() raise EUnreachableCode()
class Role(ConfigurableNode): implements(IRole) security = SecurityInformation.from_default() secured_by(security) security.protect_set('name', 'Manage Users') security.make_private('readonly') def __init__(self, *args): self.readonly = [] super(Role, self).__init__(*args) security.protect('configure', 'Manage Users') def configure(self, config): for attrname in self.readonly: current = getattr(self, attrname, None) incoming = config.get(attrname) if None not in (current, incoming) and (current != incoming): message = 'Attribute "%s" is readonly for Role "%s". ' message += 'Overriding new value %s with current value %s.' message = message % (attrname, self.name, incoming, current) msglog.log('broadway', msglog.types.WARN, message) config[attrname] = current self.description = config.get('description', '') return super(Role, self).configure(config) def configuration(self): config = super(Role, self).configuration() config['description'] = self.description return config def is_removable(self): return not len(self.readonly) def is_configurable(self): #roles are not configurable return False security.protect('prune', 'Manage Users') def prune(self): affected = ['roles'] if not self.is_removable(): error = '%s "%s" is not removable.' raise TypeError(error % (type(self).__name__, self.name)) for user in self.parent.parent.user_manager.get_users(): if self.name in user.roles: temp = user.roles temp.remove(self.name) user.set_roles(temp) if 'users' not in affected: affected.append('users') for policy in self.parent.parent.policy_manager.get_policies(): if self.name in policy.rolemap: policy.rolemap.pop(self.name) if 'policies' not in affected: affected.append('policies') #if role is associated, then more than one pdo must be changed, thus we send a list of items that are changed #else we just return as before if len(affected) > 1: super(Role, self).prune() return affected else: return super(Role, self).prune()
class TriggerManager(CompositeNode): implements(ITriggerManager) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self.dispatcher = None self._queue = None self._stopflag = None self._thread = None CompositeNode.__init__(self, *args) security.protect('get_trigger', 'View') def get_triggers(self): return self.children_nodes() security.protect('get_trigger', 'View') def get_trigger(self, name): return self.get_child(name) security.protect('add_trigger', 'Configure') def add_trigger(self, trigger): return self.add_child(trigger) security.protect('remove_trigger', 'Configure') def remove_trigger(self, trigger): return self.prune_child(trigger) security.protect('get_trigger_names', 'View') def get_trigger_names(self): return self.children_names() security.protect('get_active', 'View') def get_active(self): children = self.children_nodes() active = map(Trigger.is_active, children) return [child for child in children if child.is_active()] security.protect('get_inactive', 'View') def get_inactive(self): children = self.children_nodes() active = map(Trigger.is_active, children) return [child for child in children if not child.is_active()] def start(self): if self._thread is not None: raise Exception('Cannot call start on started ' 'Manager without stopping.') if self.dispatcher is None: self.dispatcher = Dispatcher(self.url) self.triggersub = self.dispatcher.register_for_type( self.handle_triggered, TriggerActivated) self.clearsub = self.dispatcher.register_for_type( self.handle_cleared, TriggerCleared) self._startmanager() return super(TriggerManager, self).start() def stop(self): if self.triggersub: self.dispatcher.unregister(self.triggersub) if self.clearsub: self.dispatcher.unregister(self.clearsub) self.triggersub = self.clearsub = None self._stopmanager() return super(TriggerManager, self).stop() def _add_child(self, child): pass def _rename_child(self, *args): pass def queue_trigger(self, trigger): self._queue.put(trigger) def is_running(self): return not not (self._thread and self._thread.isAlive()) security.protect('handle_triggered', 'Override') def handle_triggered(self, event): trigger = event.get_trigger() targets = trigger.get_targets() arguments = event.get_arguments() for target in targets: target.trigger(*arguments) return len(targets) security.protect('handle_cleared', 'Override') def handle_cleared(self, event): trigger = event.get_trigger() targets = trigger.get_targets() arguments = event.get_arguments() for target in targets: target.clear(*arguments) return len(targets) def _startmanager(self): self._queue = queue = Queue() self._stopflag = stopflag = Flag() self._thread = thread = ImmortalThread(name=self.url, target=self._runmanager, args=(stopflag, queue)) thread.start() def _stopmanager(self): stopflag, self._stopflag = self._stopflag, None thread, self._thread = self._thread, None if not thread: return thread.should_die() stopflag.set() thread.join() def _runmanager(self, stopflag, queue): while not stopflag.isSet(): trigger = queue.get(1) if trigger is not NOTHING: trigger() else: msglog.log('broadway', msglog.types.INFO, 'Trigger Manager exiting run.') print 'Trigger Manager run exiting.' return
class AlarmEvent(Event): implements(IAlarmEvent) manager = None security = SecurityInformation('View', 'Configure', 'Private') secured_by(security) def __init__(self, alarm, origin=None, guid=None): super(AlarmEvent, self).__init__(alarm, origin, guid) if AlarmEvent.manager is None: AlarmEvent.manager = as_node('/services/Alarm Manager') self.__history = [] self.current_event = None if self.is_local(): self._dispatcher = alarm self._subscription = alarm.dispatcher.register_for_type( self.handle_event, AlarmAction) else: self._dispatcher = self.manager self._subscription = None self.debug = 0 self.message('created.') def get_audit(self): return self.get_history() + [self.current_event] def first_action(self): return self.get_audit()[0].action def last_action(self): return self.get_audit()[-1].action def created(self): return self.first_action().timestamp def modified(self): return self.last_action().timestamp def get_history(self): return self.__history[:] history = property(get_history) def handle_event(self, event): previous = self.current_event if isinstance(event, StateEvent): next = event elif previous: next = previous(event) else: next = AlarmEventRaised(self, event) if next is not previous: if previous is not None: self.__history.append(previous) self.current_event = next if isinstance(next, AlarmEventClosed) and self._subscription: self.source.dispatcher.unregister(self._subscription) self._subscription = None self._dispatcher.dispatch(next) return True return False security.protect('acknowledge', 'Override') def acknowledge(self, actuator, timestamp, context): event = AlarmEventAcknowledged(self, timestamp, actuator, context) self.handle_event(event) security.protect('notify', 'Override') def acknowledged(self): return any(self.is_state(state) for state in ("accepted", "closed")) def asitem(self): item = {} item["id"] = self.GUID item["name"] = self.source.name item["state"] = self.state item["origin"] = self.origin item["priority"] = self.source.priority item["categories"] = ["alarm", "event"] item["createdUTC"] = self.created() item["modifiedUTC"] = self.modified() item["created"] = format(self.created()) item["modified"] = format(self.modified()) item["acknowledged"] = self.acknowledged() item["description"] = self.source.description item["history"] = [ev.tostring() for ev in self.get_audit()] return item def terminate(self, actuator, reason, timestamp=None): if not timestamp: timestamp = time.time() event = AlarmEventTerminated(self, timestamp, actuator, reason) self.handle_event(event) def notify(self, command, *args, **kw): method = getattr(self, command) return method(*args, **kw) def synchronize(self, events): current = self.get_history() if self.current_event is not None: current.append(self.current_event) newevents = events[len(current):] for event in newevents: self.handle_event(event) messages = map(str, newevents) self.message('synchronized with new events: %s.' % (messages, )) def get_state(self): return self.current_event.name state = property(get_state) def is_state(self, state): return self.state == state.upper() def get_alarm_event(self): return self def message(self, message, category='debug'): if category != 'debug' or (category == 'debug' and self.debug): print '%s: %s' % (str(self), message) return
class BackupRegistry(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self): CompositeNode.__init__(self) # dynamic entries represents BackupEntry's added on the fly by other # interfaces\services. As opposed to static entries that are added # w\ configTool, these will require pdo's. self._dynamic_entries = [] # single use, read -> close -> destroyed wrapper for a file (tarball) # object. class SimpleFileWrapper: def __init__(self, f_name, f_path): self.f_name = f_name self._f_path = f_path self._fd = open(f_path, 'r') def read(self, size): return self._fd.read(size) def close(self): self._fd.close() os.unlink(self._f_path) def start(self): CompositeNode.start(self) self.pdo = _PersistentData(self) if self.pdo.entries: self._load_registry() def register(self, name, path, bc='Other'): e = BackupEntry() cd = {'name': name, 'path': path, 'backup_class': bc} # save cd so we can recreate from pdo - not interested in preserving parent self._dynamic_entries.append(cd) self._save_registry() cd['parent'] = self e.configure(cd) e.start() def get_registered_classes(self): r = {} for entry in self.children_nodes(): r[entry.backup_class] = '' return r.keys() security.protect('generate_backup', 'Configure') def generate_backup(self, backup_name, bc=['All']): tar_file_list = '' exclude_list = "" for entry in self.children_nodes(): if backup_class['All'] in bc or entry.backup_class in bc: tar_file_list += '%s ' % entry.path if entry.exclude: exclude_base = entry.path if exclude_base.endswith('*'): exclude_base = exclude_base[:-1] for exclude in entry.exclude.split(','): exclude_list += '--exclude %s ' % (os.path.join( exclude_base, exclude)) if not tar_file_list: return None tar_tmp_file_name = '%s/%s.tgz' % (properties.TEMP_DIR, backup_name) tar_cmd = 'tar -czPf %s %s --ignore-failed-read %s' % \ (tar_tmp_file_name, tar_file_list, exclude_list) os.system(tar_cmd) return self.SimpleFileWrapper(backup_name, tar_tmp_file_name) def get_exclude_list(self, bc=['All']): exclude_list = [] for entry in self.children_nodes(): if backup_class['All'] in bc or entry.backup_class in bc: if entry.exclude: exclude_base = entry.path if exclude_base.endswith('*'): exclude_base = exclude_base[:-1] for exclude in entry.exclude.split(','): exclude_list.append('--exclude') exclude_list.append( (os.path.join(exclude_base, exclude))) return exclude_list def _extract_path(self, entry): path = '' try: if entry.path.find('*') >= 0: # expand wildcards and check for exclusions if entry.path.endswith('/*'): path_root = entry.path[:-1] for f in os.listdir(path_root): if entry.exclude: if self._in_exclude_list(f, entry.exclude.split(',')): continue mode = os.stat(os.path.join(path_root, f))[ST_MODE] if S_ISDIR(mode): path += '%s/* ' % (path_root + f) else: path += '%s ' % (path_root + f) else: for f in os.listdir(entry.path): if entry.exclude: if f in entry.exclude: continue path += entry.path else: if os.path.exists(entry.path): path = entry.path except: path = '' return path def _in_exclude_list(self, target, l): for exclude_entry in l: while exclude_entry[0] == ' ': exclude_entry = exclude_entry[1:] if target == exclude_entry: return 1 return 0 def _load_registry(self): cfg_entries = eval(self.pdo.entries) for cd in cfg_entries: cd['parent'] = self e = BackupEntry() e.configure(cd) e.start() def _save_registry(self): self.pdo.entries = repr(self._dynamic_entries) self.pdo.save()
class TrendManager(CompositeNode): implements(ITrendManager) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): super(TrendManager, self).__init__(*args) self.logger_url = None self.trends = None self._pdo_lock = Lock() self._trendconfig = None self.__running = False self.secured = True return def _persist_trend_configuration(self, trend): self._pdo_lock.acquire() try: self._trendconfig[trend.name] = marshal(trend) finally: self._pdo_lock.release() return def _delete_trend_configuration(self, trend_name): self._pdo_lock.acquire() try: if self._trendconfig.has_key(trend_name): del self._trendconfig[trend_name] finally: self._pdo_lock.release() return def configure(self, config): self.setattr('name', config.get('name', 'Trend Manager')) self.setattr('logger_url', config.get('logger_url', '/services/logger')) self.secured = as_internal_node("/services").secured super(TrendManager, self).configure(config) return def configuration(self): config = super(TrendManager, self).configuration() config['logger_url'] = self.getattr('logger_url') return config def start(self): try: self._pdo_lock.acquire() try: if self.__running: return self.__running = True self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None) if not self._trendconfig: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trend data") pdodata.trends = {} pdodata.load() self._trendconfig.update(pdodata.trends) del (pdodata) finally: self._pdo_lock.release() super(TrendManager, self).start() self.logger = node.as_internal_node(self.logger_url) if self.has_child('trends'): self.trends = self.get_child('trends') else: self.trends = CompositeNode() self.trends.configure({'parent': self, 'name': 'trends'}) self.trends.start() corrupt_trends = [] for trendname, trenddump in self._trendconfig.items(): msg = "Loading trend: %s" % trendname msglog.log('trendmanager', msglog.types.INFO, msg) try: trend = unmarshal(trenddump) except: corrupt_trends.append(trendname) msg = "Failed to load trend: %s" % trendname msglog.log('trendmanager', msglog.types.ERR, msg) msglog.exception(prefix='Handled') for trendname in corrupt_trends: try: msg = "Deleting trend information: %s" % trendname msglog.log('trendmanager', msglog.types.INFO, msg) self._delete_trend_configuration(trendname) if self.trends.has_child(trendname): trend = self.trends.get_child(trendname) trend.prune(force=True) except: msglog.exception(prefix='Handled') except: self.__running = False raise return def stop(self): self.__running = False super(TrendManager, self).stop() return def is_trendable(self, log_node): if log_node.configuration().has_key('period'): # Assume a log with a period is valid. return True if not log_node.has_child('columns'): # If the log does not have a columns container, than it doesn't # look like a proper log. return False column_nodes = log_node.get_child('columns').children_nodes() if not column_nodes: # If the log does not have any columns, than it doesn't # look like a proper log. return False if len(column_nodes) > MAX_TRENDABLE_COLUMNS: # If the log has more than MAX_TRENDABLE_COLUMNS (9), then the # EmbeddedGraph can not display it. return False has_timestamp = False for column in column_nodes: column_configuration = column.configuration() if not column_configuration.has_key('name'): # OK, this should NEVER happen... return False if column_configuration['name'] == 'timestamp': has_timestamp = True if not column_configuration.has_key('conversion'): # To be safe, each column must have a conversion... return False if column_configuration['conversion'] != 'magnitude': # And the conversion must be a 'magnitude' return False if not has_timestamp: # Graph requires a timestamp. return False return True security.protect('get_trends', 'View') def get_trends(self): if not self.__running: raise ENotRunning() trend_names = [] for name in self.trends.children_names(): trend_names.append(name) for log_node in self.logger.children_nodes(): trend_name = log_node.name if not trend_name in trend_names: if (self.is_trendable(log_node)): trend_adapter = PeriodicLogTrendAdapter() trend_adapter.configure({ 'parent': self.trends, 'name': trend_name, }) trend_adapter.start() self._persist_trend_configuration(trend_adapter) trend_names.append(trend_name) trend_names.sort() trends = [] for trend_name in trend_names: trends.append(self.trends.get_child(trend_name)) return trends security.protect('get_trend', 'View') def get_trend(self, trend_name): if not self.__running: raise ENotRunning() if not self.trends.has_child(trend_name): # Autodiscoveresque. self.get_trends() # @fixme Raise a better exception... return self.trends.get_child(trend_name) def get_trend_preferences(self, trend_name): trend_name = urllib.unquote_plus(trend_name) trend = self.get_trend(trend_name) preferences = trend.get_preferences() points = trend.get_points() for i in xrange(0, len(points)): try: points[i]["color"] = preferences["points"][i][ "color"] = "#%06X" % int(preferences["points"][i]["color"]) except: points[i]["color"] = preferences["points"][i]["color"] points[i]["y-axis"] = preferences["points"][i]["y-axis"] preferences["points"] = points try: preferences["background"]["color"] = "#%06X" % int( preferences["background"]["color"]) except: pass try: preferences["text"]["color"] = "#%06X" % int( preferences["text"]["color"]) except: pass msglog.log("broadway", msglog.types.INFO, "Preferences: %r" % preferences) return preferences security.protect('delete_trend', 'Configure') def delete_trend(self, trend_name): if not self.__running: raise ENotRunning() self._delete_trend_configuration(trend_name) if not self.trends.has_child(trend_name): # Autodiscoveresque. self.get_trends() # @fixme Raise a better exception... trend = self.trends.get_child(trend_name) trend.prune() trend.destroy() return security.protect('update_trend', 'Configure') def update_trend(self, trend_name, new_cfg, **kw): if not self.__running: raise ENotRunning() confirmed = kw.get('confirmed', 0) deletedata = kw.get('deletedata', 0) trend = self.get_trend(trend_name) confirmation = ConfirmUpdateTrend(trend, new_cfg) #@fixme, dleimbro if 0: #not confirmed and confirmation.requires_confirmation(): return confirmation if confirmation.configuration_changed(): try: if deletedata: trend.delete_existing_data() if confirmation.requires_stop_and_restart(): trend.stop() trend.configure(confirmation.configuration()) if confirmation.requires_stop_and_restart(): trend.start() except: msglog.exception() try: trend.stop() except: msglog.exception() trend.configure(confirmation.original_configuration()) trend.start() raise else: self._persist_trend_configuration(trend) return None def _new_trend(self, name): if not self.__running: raise ENotRunning() """ Return an instance that implements ITrend interface for new trend with no points. """ new_trend = Trend() period = 60 points = [] preferences = {} new_trend.configure({ 'parent': self.trends, 'name': name, 'period': period, 'points': points, 'preferences': preferences }) return new_trend security.protect('new_trend', 'Configure') def new_trend(self, name=None): if name: return self._new_trend(name) while True: try: new_trend = self._new_trend(self.generate_trend_name()) break #was going into loop and generating thousands of trends. #This breaks loop when an unused (generated) trend name is found except ENameInUse: continue return new_trend security.protect('generate_trend_name', 'View') def generate_trend_name(self): i_trend = 1 while True: try: self.get_trend('Trend %d' % i_trend) i_trend += 1 except ENoSuchName: break return ('Trend %d' % i_trend)
class ExportersConfigurator(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self._pdo_lock = Lock() self.manager = None super(ExportersConfigurator, self).__init__(*args) def configure(self, config): self.setattr('path', config.get('path', '/exportconfig')) self.setattr('container', config.get('container', '/services/Alarm Exporters')) self.secured = as_internal_node("/services").secured super(ExportersConfigurator, self).configure(config) def configuration(self): config = super(ExportersConfigurator, self).configuration() config['path'] = self.getattr('path') config['container'] = self.getattr('container') return config def start(self): self.container = self.nodespace.as_node(self.container) self._pdo_lock.acquire() try: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.exporters = {} self._pdo.load() exporterdumps = self._pdo.exporters.values() finally: self._pdo_lock.release() super(ExportersConfigurator, self).start() tstart = time.time() for exporterdump in exporterdumps: IPickles(cPickle.loads(exporterdump))() tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Exporter Configurator loaded ' '%s exporters in %s seconds.' % (len(exporterdumps), tlapse)) self.manager = self.container def stop(self): super(ExportersConfigurator, self).stop() self.container = None def match(self, path): return path.startswith(self.path) security.protect('create_node', 'Configure') def create_node(self, name, config=()): config = dict(config) config.setdefault("name", name) config.setdefault("parent", self.manager) exporter = self.manager.nodespace.create_node(AlarmExporter) exporter.configure(config) exporter.start() self.updatepdo() return exporter.name security.protect('remove_node', 'Configure') def remove_node(self, name): exporter = self.manager.get_child(name) exporter.prune() self.updatepdo() return exporter.name security.protect('configure_node', 'Configure') def configure_node(self, name, config): exporter = self.manager.get_child(name) exporter.configure(config) self.updatepdo() return exporter.name security.protect('node_configuration', 'View') def node_configuration(self, name, extended=False): exporter = self.manager.get_child(name) return exporter.configuration() security.protect('configure_formatter', 'Configure') def configure_formatter(self, exporter, config): return self.configure_node(exporter, {"formatter": config}) security.protect('formatter_configuration', 'View') def formatter_configuration(self, exporter, extended=False): return self.node_configuration(exporter).get("formatter", {}) security.protect('configure_transporter', 'Configure') def configure_transporter(self, exporter, config): return self.configure_node(exporter, {"transporter": config}) security.protect('transporter_configuration', 'View') def transporter_configuration(self, exporter, extended=False): return self.node_configuration(exporter).get("transporter", {}) security.protect('trigger_configuration', 'View') def trigger_configuration(self, name=None): manager = self.nodespace.as_node('/services/Alarm Manager') sources = [manager] + manager.get_alarms() configuration = dict([(source.url, []) for source in sources]) if name: exporter = self.manager.get_child(name) configuration.update(exporter.trigger_configuration()) configs = [] for source, events in configuration.items(): configs.append({"source": source, "events": events}) return configs security.protect('configure_triggers', 'Configure') def configure_triggers(self, name, triggers=()): configuration = {} for config in triggers: configuration[config["source"]] = config["events"] exporter = self.manager.get_child(name) exporter.configure_triggers(configuration) self.updatepdo() security.protect('get_node_names', 'View') def get_node_names(self): return self.manager.children_names() def updatepdo(self): exporters = {} self._pdo_lock.acquire() try: for exporter in self.manager.get_exporters(): exporters[exporter.name] = cPickle.dumps(IPickles(exporter)) self._pdo.exporters = exporters self._pdo.save() finally: self._pdo_lock.release() def handle_request(self, request): update_pdo = False response = Response(request) request_data = request.get_post_data_as_dictionary() request_data.update(request.get_query_string_as_dictionary()) if request_data.has_key('add'): adapt = self.create_exporter("New Exporter") elif request_data.has_key('remove'): name = urllib.unquote_plus(request_data['remove'][0]) self.remove_exporter(name) adapt = self.container elif request_data.has_key('edit'): name = urllib.unquote_plus(request_data['edit'][0]) update_pdo = False adapt = self.container.get_exporter(name) elif request_data.has_key('configure'): name = urllib.unquote_plus(request_data['configure'][0]) exporter = self.container.get_exporter(name) config = {'Exporter': {}, 'Formatter': {}, 'Transporter': {}} for attrname in request_data.keys(): splitname = attrname.split('.') if len(splitname) == 2 and config.has_key(splitname[0]): config[splitname[0]][splitname[1]] = urllib.unquote_plus( request_data[attrname][0]) exportconfig = config['Exporter'] exportconfig['formatter'] = config['Formatter'] exportconfig['transporter'] = config['Transporter'] exporter.configure(exportconfig) update_pdo = True adapt = exporter else: adapt = self.container if request_data.has_key('actionName'): target = urllib.unquote_plus(request_data.get('target')[0]) action = urllib.unquote_plus(request_data.get('actionName')[0]) params = map(urllib.unquote_plus, request_data.get('params')) exporter = self.container.get_exporter(target) method = getattr(exporter, action) result = method(*params) update_pdo = True if update_pdo: self.updatepdo() webadapter = IWebContent(adapt) response.send(webadapter.render())
class BACnetOverrideableProperty(BACnetProperty): security = SecurityInformation.from_default() secured_by(security) implements(IAliasNode) def start(self): if not self.has_child('Priority Array'): pa = NodeProxy() cd = { 'name': 'Priority Array', 'parent': self, 'node_url': self.get_subject().get_child(BAC_PRIORITY_ARRAY).as_node_url() } pa.configure(cd) pa.start() self.priority_array = self.get_child('Priority Array') BACnetProperty.start(self) def set(self, value): self.get_subject().set(value) security.protect('get_override', 'View') def get_override(self): try: ovr = self.priority_array.get() except: msg = 'Unable to get override for device %s' % \ (self.as_node_url()) msglog.log('Entity Manager', msglog.types.WARN, msg) msglog.exception() raise pa = {} for idx in range(16): value = ovr[idx] if hasattr(value, 'as_magnitude'): try: value = value.as_magnitude() except: pass pa[str(idx + 1)] = value return OverrideDict(pa, self.get_default()) security.protect('override', 'Override') def override(self, value, level=16): if not self.is_running(): raise ENotRunning() if isinstance(value, dict): value = OverrideDict(value, self.get_default()) override_list = [] if isinstance(value, OverrideDict): current_ovr = self.get_override() for pa_level in range(1, 16 + 1): ovr = value.get_override(pa_level) if isinstance(ovr, StringType): try: ovr = float(ovr) except: pass if ovr != current_ovr.get_override(pa_level): override_list.append((ovr, pa_level)) else: if isinstance(value, StringType): try: value = float(value) except: pass override_list.append((value, level)) for value, level in override_list: # for historical reasons, override(None, level) has the same behavior # as release(level) if value is None or value == 'None': self.release(level) continue try: self.priority_array.get_child(str(level)).set(value) except: msg = 'Unable to override device %s at level %d' % \ (self.as_node_url(), level) msglog.log('Entity Manager', msglog.types.WARN, msg) msglog.exception() security.protect('release', 'Override') def release(self, level): if not self.is_running(): raise ENotRunning() try: self.priority_array.get_child(str(level)).set(None) except: msg = 'Unable to release property %s at level %d' % \ (self.as_node_url(), level) msglog.log('Entity Manager', msglog.types.WARN, msg) msglog.exception() security.protect('get_write_priority', 'View') def get_write_priority(self): pa = self.priority_array.get() write_priority = None for idx in range(0, 16): if hasattr(pa[idx], 'as_magnitude'): value = pa[idx].as_magnitude() else: value = pa[idx] if not value is None: write_priority = idx + 1 break return write_priority security.protect('set_default', 'Override') def set_default(self, value): if not self.is_running(): raise ENotRunning() self.get_subject().get_child(BAC_RELINQUISH_DFLT).set(value) security.protect('get_default', 'View') def get_default(self): try: default = self.get_subject().get_child( BAC_RELINQUISH_DFLT).get() # relinquish default except: default = None return default
class User(ConfigurableNode): implements(IUser) security = SecurityInformation.from_default() secured_by(security) #security.protect('roles', 'Manage Users') security.make_private('readonly') def __init__(self, *args): self.dispatcher = Dispatcher() self._lock = threading.Lock() self.roles = [] self.readonly = [] self.homepage = '/' self.__password = '' self.description = '' super(User, self).__init__(*args) #security.protect('password', 'Manage Users') def __get_password(self): return self.__password def __set_password(self, password): previous = self.password self.__password = password if previous != self.password and self.parent is not None: event = UserPasswordModified(self, password, previous) self.dispatcher.dispatch(event) return password = property(__get_password, __set_password) security.protect('configure', 'View') def configure(self, config): for attrname in self.readonly: current = getattr(self, attrname, None) incoming = config.get(attrname) if None not in (current, incoming) and (current != incoming): message = 'Attribute "%s" is readonly for User "%s". ' message += 'Overriding new value %s with current value %s.' message = message % (attrname, self.name, incoming, current) msglog.log('broadway', msglog.types.WARN, message) config[attrname] = current self.description = config.get('description', self.description) self.homepage = config.get('homepage', self.homepage) # Ignoring password if all astericks. password = config.get('password', "") if config.has_key('old_password') and config.get('old_password') == '': raise Exception("Invalid Old Password") old_password = config.get('old_password', None) if password and (password != len(password) * '*'): system_users = PasswdFile() system_users.load() if old_password and config.get('name', '') in system_users: system_shadow = ShadowFile() system_shadow.load() shadowentry = system_shadow[config.get('name')] if not shadowentry.password_matches_crypt(old_password): raise Exception("Invalid Old Password") self.__set_password(password) self.password = password super(User, self).configure(config) if config.has_key('roles'): self.set_roles(list(config.get('roles', self.roles))) def configuration(self): config = super(User, self).configuration() config['description'] = self.description config['homepage'] = self.homepage config['password'] = "******" config['roles'] = self.get_roles() return config def start(self): super(User, self).start() self._synchronize() def is_removable(self): return not len(self.readonly) def is_configurable(self): return True security.protect('prune', 'Manage Users') def prune(self): if not self.is_removable(): error = '%s "%s" is not removable.' raise TypeError(error % (type(self).__name__, self.name)) return super(User, self).prune() def _synchronize(self): self._lock.acquire() try: roles = self.roles[:] for role in roles: if not self.parent.role_manager.has_role(role): message = 'User "%s" ' % self.url message += 'removing role "%s". ' % role message += 'It does not exist.' msglog.log('broadway', msglog.types.WARN, message) self.roles.remove(role) finally: self._lock.release() security.protect('get_roles', 'View') def get_roles(self): return self.roles[:] security.protect('set_roles', 'Manage Users') def set_roles(self, *roles): # Allow roles to be list or tuple, or many params. if len(roles) == 1 and isinstance(roles[0], (list, tuple)): roles = roles[0][:] for role in roles: if not self.parent.parent.role_manager.has_role(role): raise ValueError('Role "%s" does not exist.' % role) rolenames = [] for role in roles: if isinstance(role, str): rolenames.append(role) else: rolenames.append(role.name) if self.parent.anonymous is not self: authenticated = self.parent.role_manager.authenticated.name if self.parent.sysadmin is self: adminrole = self.parent.role_manager.administrator.name if adminrole not in rolenames: message = 'User "%s" is system admin. Appending role "%s".' msglog.log('broadway', msglog.types.WARN, message % (self.name, adminrole)) rolenames.append(adminrole) elif self.parent.anonymous is self: unknownrole = self.parent.role_manager.unknown.name if unknownrole not in rolenames: message = 'User "%s" is anonymous. Appending role "%s".' msglog.log('broadway', msglog.types.WARN, message % (self.name, unknownrole)) rolenames.append(unknownrole) self._lock.acquire() try: previous = self.roles if len(rolenames) == 0: unknownrole = self.parent.role_manager.unknown.name rolenames.append(unknownrole) self.roles = rolenames finally: self._lock.release() if self.roles != previous: event = UserRolesModified(self, self.roles, previous) self.dispatcher.dispatch(event) return
class ComparisonTrigger(Calculated): implements(IComparisonTrigger) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): Calculated.__init__(self, *args) self.deferred = None def configure(self, config): set_attribute(self, 'comparison', '', config) set_attribute(self, 'input', '', config) set_attribute(self, 'constant', '', config) if 'hysteresis' in config: try: policy = float(config['hysteresis']) except ValueError: raise ValueError( 'Value of field \'Hysteresis\' is not numeric') if 'alarm_delay' in config: try: policy = float(config['alarm_delay']) except ValueError: raise ValueError( 'Value of field \'Alarm delay\' is not numeric') set_attribute(self, 'hysteresis', 0.0, config, float) set_attribute(self, 'alarm_delay', 0.0, config, float) if self.hysteresis < 0: raise ValueError('Hysteresis value cannot be negative.') set_attribute(self, 'message', 'input is less than ${constant}', config) if self.comparison and self.input: if self.comparison in ('greater_than', '>', 'input > constant'): self._comparison_operator = '>' elif self.comparison in ('less_than', '<', 'input < constant'): self._comparison_operator = '<' else: raise EConfigurationIncomplete('comparison') statement = 'input %s constant' % self._comparison_operator config['variables'] = [{ 'vn': 'input', 'node_reference': config['input'] }, { 'vn': 'constant', 'node_reference': config['constant'] }] config['statement'] = statement config['critical_input'] = 'input' return Calculated.configure(self, config) security.protect('get_constant', 'View') def get_constant(self): return self.local_context['constant'] security.protect('set_constant', 'Override') def set_constant(self, const): self.local_context['constant'] = const def configuration(self): config = Calculated.configuration(self) get_attribute(self, 'comparison', config) get_attribute(self, 'input', config) get_attribute(self, 'constant', config) get_attribute(self, 'hysteresis', config, str) get_attribute(self, 'alarm_delay', config, str) return config def start(self): super(ComparisonTrigger, self).start() self._constant = float(self.constant) def stop(self): super(ComparisonTrigger, self).stop() self._constant = None def _dispatch(self, event): if self.hysteresis != 0: comparison = self._comparison_operator if isinstance(event, TriggerActivated): # Comarpsion is True if comparison == '>': # Input is > constant self.set_constant(self._constant - self.hysteresis) elif comparison == '<': # Input is < constant self.set_constant(self._constant + self.hysteresis) else: raise ValueError('Operator %s uknnown.' % comparison) elif isinstance(event, TriggerCleared): # Comparison is False, clear hysteresis self.set_constant(self._constant) else: raise TypeError('Event of unknown type.') if self.alarm_delay: self.synclock.acquire() try: if isinstance(event, TriggerActivated): self.deferred = scheduler.after(self.alarm_delay, self._deferred_dispatch, (event, )) elif isinstance(event, TriggerCleared) and self.deferred: scheduled, self.deferred = self.deferred, None if scheduled: scheduled.cancel() else: super(ComparisonTrigger, self)._dispatch(event) finally: self.synclock.release() else: super(ComparisonTrigger, self)._dispatch(event) def _deferred_dispatch(self, event): self.synclock.acquire() try: if self.deferred: super(ComparisonTrigger, self)._dispatch(event) self.deferred = None finally: self.synclock.release()
class TriggersConfigurator(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self._triggers = None self.security_manager = None self._pdo_lock = Lock() super(TriggersConfigurator, self).__init__(*args) def configure(self, config): self.setattr('path', config.get('path', '/triggerconfig')) self.setattr('manager', config.get('container', '/services/Trigger Manager')) self.secured = as_internal_node("/services").secured super(TriggersConfigurator, self).configure(config) def configuration(self): config = super(TriggersConfigurator, self).configuration() config['path'] = self.getattr('path') config['manager'] = self.getattr('manager') return config def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary(filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del (pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node( "/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start() def stop(self): super(TriggersConfigurator, self).stop() self.manager = None def _loadtriggers(self, names=None): triggers = [] if names is None: names = self._triggers.keys() elif not isinstance(names, (list, tuple, set)): names = [names] for name in names: dump = self._triggers[name] try: trigger = unmarshal(dump) except: msglog.log("broadway", msglog.types.WARN, "Unable to load trigger: %s" % name) msglog.exception(prefix="handled") else: triggers.append(trigger) return triggers def _storetriggers(self, triggers=None): if triggers is None: triggers = self.manager.get_triggers() elif not isinstance(triggers, (list, set, tuple)): triggers = [triggers] for trigger in triggers: try: dump = marshal(trigger) except: msglog.log("broadway", msglog.types.WARN, "Unable to marshal trigger: %s" % trigger.name) msglog.exception(prefix="handled") else: self._triggers[trigger.name] = dump return triggers def _poptriggers(self, names=None): if names is None: existing = set(self.manager.get_trigger_names()) stored = self._triggers.keys() names = set(stored) - set(existing) elif not isinstance(names, (list, tuple, set)): names = [names] removed = [] for name in names: try: self._triggers.pop(name) except: msglog.log("broadway", msglog.types.WARN, "Unable to remove trigger data: %s" % name) msglog.exception(prefix="handled") else: removed.append(name) return removed def match(self, path): return path.startswith(self.path) security.protect('create_trigger', 'Configure') security.protect('create_node', 'Configure') def create_trigger(self, name, config=()): config = dict(config) if "type" in config: type = config.pop("type") else: type = "ComparisonTrigger" if isinstance(type, str): if type.endswith("ComparisonTrigger"): type = ComparisonTrigger elif type.endswith("BoundTrigger"): type = BoundTrigger else: raise ValueError("Uknown type: %r" % type) config.setdefault("name", name) config.setdefault("parent", self.manager) trigger = self._create_trigger(type, config) self._storetriggers([trigger]) return trigger.name create_node = create_trigger security.protect('remove_trigger', 'Configure') security.protect('remove_node', 'Configure') def remove_trigger(self, name): self._remove_trigger(name) self._poptriggers([name]) return name remove_node = remove_trigger security.protect('configure_trigger', 'Configure') security.protect('configure_node', 'Configure') def configure_trigger(self, name=None, config=()): config = dict(config) if name is None: if config.has_key("name"): name = config["name"] else: raise TypeError("configure_trigger() requires" " name or configuration with name") trigger = self.manager.get_trigger(name) try: trigger.stop() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on stop.') msglog.exception(prefix='Handled') trigger.configure(config) try: trigger.start() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on start.') msglog.exception(prefix='Handled')
class _Trigger(CompositeNode): implements(ITrigger) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self.targetmap = {} self.targets = set() self.unresolvable = set() self.synclock = Lock() super(_Trigger, self).__init__(*args) security.protect('get_targets', 'View') def get_targets(self, unresolved=False): #get targets targets = [] for targeturl in self.targets: target = self.targetmap.get(targeturl) if target and not target.parent: message = "Trigger %s resetting pruned target: %r." msglog.warn(message % (self.name, targeturl)) self.targetmap.pop(targeturl) target = None if not target: try: target = self.nodespace.as_node(targeturl) except KeyError: if targeturl not in self.unresolvable: message = "Trigger %s Unable to resolve target: %r." msglog.warn(message % (self.name, targeturl)) self.unresolvable.add(targeturl) else: self.targetmap[targeturl] = target self.unresolvable.discard(targeturl) if target: targets.append(target) elif unresolved: targets.append(targeturl) return targets security.protect('get_target_names', 'View') def get_target_names(self): return [target.name for target in self.get_targets()] security.protect('add_target', 'Configure') def add_target(self, target): if not isinstance(target, str): targeturl = as_node_url(target) else: targeturl = target try: target = self.nodespace.as_node(targeturl) except KeyError: target = None if targeturl == "/": raise ValueError("Invalid trigger target: %r" % target) self.synclock.acquire() try: if targeturl not in self.targets: self.targets.add(targeturl) if target: self.targetmap[targeturl] = target else: message = "Trigger %r added unresolvable target: %r" msglog.warn(message % (self.name, targeturl)) added = True else: added = False message = "Trigger %r not adding target %r: already exists." msglog.warn(message % (self.name, targeturl)) finally: self.synclock.release() return added security.protect('remove_target', 'Configure') def remove_target(self, target): if not isinstance(target, str): targeturl = as_node_url(target) else: targeturl = target self.synclock.acquire() try: self.targets.remove(targeturl) except KeyError: removed = False message = "Target %s not removed from %s: does not exist." msglog.warn(message % (target, self)) else: try: self.targetmap.pop(targeturl) except KeyError: pass removed = True msglog.inform("Target %s removed from %s." % (target, self)) finally: self.synclock.release() return removed
class Manager(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self): CompositeNode.__init__(self) self._mount_points = [] self._start_scheduler = None self._alias_root = None def configure(self, cd): CompositeNode.configure(self, cd) set_attribute(self, 'manage_aliases', 1, cd, int) def configuration(self): cd = CompositeNode.configuration(self) get_attribute(self, 'manage_aliases', cd) return cd # note: Manager starts entities that are located below the Aliases # branch. Normally start is not called on nodes that are descendents of # /aliases. To minimize start order def start(self): self._alias_root = as_node('/aliases') # if the NBM supports BACnet, defer start up of entity infrastructure # until BACnet starts. for node in as_node('/services/network').children_nodes(): if isinstance(node, BACnet): if not node.is_running(): # see if this function is already wrapped. If it is wrapped # im_self is not an attribute. if hasattr(node.start, 'im_self'): node.start = wrapped(node.start, None, self.do_start) return self.do_start() def do_start(self): if not self.is_running(): CompositeNode.start(self) security.protect('create_polled', 'View') def create_polled(self, node_reference_table=None, timeout=300): if node_reference_table is None: node_reference_table = {} for mount_point, nodepath in self.get_mount_points(): try: node_reference_table[nodepath] = mount_point.host except: msg = 'Failed to establish presence monitoring for %s' % nodepath msglog.log('Entity Manager', msglog.types.WARN, msg) return SM.create_polled(node_reference_table, timeout) def update_hosts_status(self): for mount_point, nodepath in self.get_mount_points(): mount_point.host.skip_cache() def register_mount_point(self, mount_point, nodepath): self._mount_points.append((mount_point, nodepath)) def get_mount_points(self): return self._mount_points def get_alias_root(self): return self._alias_root security.protect('destroy', 'View') def destroy(self, sid): return SM.destroy(sid) security.protect('poll_all', 'View') def poll_all(self, sid): return SM.poll_all(sid) security.protect('poll_changed', 'View') def poll_changed(self, sid): return SM.poll_changed(sid) def get_entities(self, root=None): if root == '/' or root is None: root = self elif root.startswith('/'): root = self.as_node(root[1:]) else: root = self.as_node(root) return [x for x in root.children_nodes() if \ isinstance(x, EntityTypes)] def get_entities_name(self, root=None): return [x.name for x in self.get_entities(root)] def get_entity_names_for_ui(self, root=None): return [entity.name for entity in self.get_entities(root)] # following methods are partially delegated to alias root def children_nodes(self, **options): return self._alias_root.children_nodes(**options) + \ CompositeNode.children_nodes(self, **options) def children_names(self, **options): return self._alias_root.children_names(**options) + \ CompositeNode.children_names(self, **options) def get_child(self, name, **options): try: return CompositeNode.get_child(self, name, **options) except: return self._alias_root.get_child(name, **options) def has_child(self, name, **options): return self._alias_root.has_child(name, **options) or \ CompositeNode.has_child(self, name, **options) def has_children(self): return self._alias_root.has_children() or \ CompositeNode.has_children(self) def resolve_mount_paths(self, entity_path): from_path = '' to_path = '' try: entity = as_node(entity_path) if isinstance(entity, EntityTypes) and entity.is_remote(): mp = entity.get_mount_point() from_path = mp.configuration().get('mountpoint') if not from_path.endswith('/'): # adding slash to avoid bad replaces. They are urls # so a trailing / is safer. from_path += '/' to_path = mp.as_node_url() if not to_path.endswith('/'): to_path += '/' except: pass return [from_path, to_path] def singleton_unload_hook(self): pass
class RoleManager(SecurityService): implements(IRoleManager) security = SecurityInformation.from_default() secured_by(security) security.protect('add_child', 'Manage Users') security.protect('remove_child', 'Manage Users') security.protect('rename_child', 'Manage Users') def __init__(self, *args): self.__unknown = None self.__authenticated = None self.__administrator = None self.__started = False super(RoleManager, self).__init__(*args) def __get_unknown(self): return self.__unknown unknown = property(__get_unknown) def __get_authenticated(self): return self.__authenticated authenticated = property(__get_authenticated) def __get_administrator(self): return self.__administrator administrator = property(__get_administrator) def __get_manager(self): return self.__manager manager = property(__get_manager) def get_roles(self): if not self.__started: self.start() children = self.children_nodes() return children def get_role(self, name): if not self.__started: self.start() child = self.get_child(name) return child def get_role_names(self): if not self.__started: self.start() names = self.children_names() return names def has_role(self, rolename): if not self.__started: self.start() return self.has_child(rolename) # to be used only by UI to determine if the # logged in user should be allowed to view # all the nodes or not based on the permissions. security.protect('is_manage_users_capable', 'Manage Users') def is_manage_users_capable(self): return True def start(self): if not self.__started: self.__started = True self.__unknown = self.__create_default('Unknown') self.__authenticated = self.__create_default('Operator') self.__manager = self.__create_default('Manager') self.__administrator = self.__create_default( 'System Administrator') super(RoleManager, self).start() def __create_default(self, rolename, readonly=['name']): if self.has_role(rolename): role = self.get_role(rolename) else: role = self.nodespace.create_node(Role) msglog.log('broadway', msglog.types.INFO, 'Role Manager created default role: "%s"' % rolename) config = {'parent': self, 'name': rolename} role.configure(config) role.readonly = readonly return role
class BoundTrigger(_Trigger): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self.source = None self._source = None self._source_clear = None self._source_trigger = None super(BoundTrigger, self).__init__(*args) def configure(self, config): self.source = config.get("source", self.source) return super(BoundTrigger, self).configure(config) def configuration(self): config = super(BoundTrigger, self).configuration() if self.source: config["source"] = self.source return config def start(self): if not self.is_running(): self._source = as_node(self.source) if self._source.trigger.im_self is not self: self._source_trigger = self._source.trigger self._source.trigger = self.trigger if self._source.clear.im_self is not self: self._source_clear = self._source.clear self._source.clear = self.clear return super(BoundTrigger, self).start() def stop(self): if self._source: if self._source_trigger: self._source.trigger = self._source_trigger self._source_trigger = None if self._source_clear: self._source.clear = self._source_clear self._source_clear = None self._source = None return super(BoundTrigger, self).stop() security.protect('trigger', 'Override') def trigger(self, source, *args, **kw): result = self._source_trigger(source, *args, **kw) if not isinstance(source, str): source = as_node_url(source) for target in self.get_targets(): try: target.trigger(source, *args, **kw) except: message = "%s failed to trigger target: %s." msglog.warn(message % (self, target)) msglog.exception(prefix="handled") return result security.protect('trigger', 'Override') def clear(self, source, *args, **kw): result = self._source_clear(source, *args, **kw) if not isinstance(source, str): source = as_node_url(source) for target in self.get_targets(): try: target.clear(source, *args, **kw) except: message = "%s failed to clear target: %s." msglog.warn(message % (self, target)) msglog.exception(prefix="handled") return result
class Entity(CompositeNode, ProxyMixin, MetaMixin): security = SecurityInformation.from_default() secured_by(security) def __init__(self): CompositeNode.__init__(self) ProxyMixin.__init__(self) MetaMixin.__init__(self) # consideration: memory utilization. This really is redundant, # since the properties are also mapped into the node tree. self._properties = {} self._properties_list = [] self._properties_loaded = False self._load_lock = Lock() self.__security_manager = None def configure(self, cd): set_attribute(self, 'display', '', cd) set_attribute(self, 'visible', '1', cd, as_boolean) if "meta" in cd: self.configure_meta(cd["meta"]) CompositeNode.configure(self, cd) def configuration(self): cd = CompositeNode.configuration(self) get_attribute(self, 'display', cd) get_attribute(self, 'visible', cd) return cd def start(self): # the entity property is a special case. It should show up # in the property list, but isn't necessarily represented in the node # tree the way normal properties are. e_prop = EntityProp(name='Entity', type='Entity', label='', description='', url=self.display, entity=self.as_node_url()) self.add_property(e_prop) if not self.is_remote(): self.set_subject(self) self._load_properties() else: mp = self.get_mount_point() try: subject = mp.as_remote_node(self.as_remote_url()) self.set_subject(subject) except: msglog.exception() CompositeNode.start(self) def get_entities(self): return [x for x in self.children_nodes() if \ isinstance(x, EntityTypes)] def get_entities_name(self): return [x.name for x in self.get_entities()] def get_entity_names_for_ui(self, root=None): return [ entity.name for entity in self.get_entities() if entity.visible ] def get_display(self): return self.display def _load_properties(self): self._load_lock.acquire() try: if self._properties_loaded: return for prop_container in self.children_nodes(): if not isinstance(prop_container, PropertyContainer): continue for prop in prop_container.children_nodes(): as_prop = prop.as_property() self._properties[(prop_container.ptype, prop.name)] = as_prop as_prop_d = as_prop.as_dict() if not as_prop_d in self._properties_list: self._properties_list.append(as_prop_d) self._properties_loaded = True finally: self._load_lock.release() def get_property_containers(self): nodes = self.children_nodes() return [node for node in nodes if isinstance(node, PropertyContainer)] def get_property_count(self): return len(self._properties) def add_property(self, prop): self._properties[(prop.type, prop.name)] = prop self._properties_list.append(prop.as_dict()) security.protect('get_property_list', 'View') def get_property_list(self): if not self._properties_loaded: self._load_properties() return self._properties_list def get_property_ref(self, ptype, name): if not self._properties_loaded: self._load_properties() return self._properties.get((ptype, name)).reference def get_property(self, ptype, name): return self.get_property_ref(ptype, name).get() def get_property_multiple(self, prop_list): result = {} for ptype, name in prop_list: try: result[(ptype, name)] = self.get_property(ptype, name) except Exception, e: result[(ptype, name)] = e return result
class Policy(ConfigurableNode): implements(IPolicy) security = SecurityInformation.from_default() secured_by(security) security.protect('context', 'Manage Users') security.protect('acquires', 'Manage Users') security.protect('rolemap', 'Manage Users') security.make_private('readonly') def __init__(self, *args): self.readonly = [] self.__lock = RLock() self.__active = Event() self.acquires = True self.context = "" self.filter = "" self.is_default = False self.uses_expression = False self.configured_expression = False self.rolemap = _RoleMap() super(Policy, self).__init__(*args) def get_readOnly(self, attr): return attr in self.readonly security.protect('configure', 'Manage Users') def configure(self, config): """ Configure node with from configuration dictionary. The policy may be configured to use regular expressions instead of simple node-URL contexts in two different ways. The configuration dictionary may include a True/False value "uses_expression", in which case the context regular expression is taken verbatim from the "context" parameter, and an optional filter expression from the "filter" parameter. Or the expression(s) may be encoded directly into the "context" value using the following format: REGEX: '<context regex>', FILTER: '<filter regex>' Note that the ", FILTER: [...]" part is completely optional and will default to no filter. """ for attrname in self.readonly: current = getattr(self, attrname, None) incoming = config.get(attrname) if None not in (current, incoming) and (current != incoming): message = 'Attribute "%s" is readonly for Policy "%s". ' message += 'Overriding new value %s with current value %s.' message = message % (attrname, self.name, incoming, current) msglog.log('broadway', msglog.types.WARN, message) config[attrname] = current self.acquires = as_boolean(config.get('acquires', self.acquires)) if not self.is_default: self.is_default = as_boolean(config.get('is_default', False)) self.configure_context(config) super(Policy, self).configure(config) # Getting set of mappings whose value is a callback function. # The assumption is being made here that such mappings are # those considered 'read-only.' self.__lock.acquire() try: inherent = self.rolemap.callable_subset() self.rolemap = _RoleMap(config.get('rolemap', self.rolemap)) self.rolemap.update(inherent) finally: self.__lock.release() self.__verify_setup() def configure_context(self, config): context = config.get('context', self.context) if not context: raise TypeError("Policy must have non-empty context") if config.has_key("uses_expression"): self.uses_expression = as_boolean(config["uses_expression"]) else: self.uses_expression = False if self.uses_expression: self.context = context if config.has_key("filter"): self.filter = config["filter"] self.configured_expression = True else: self.configured_expression = False if isinstance(context, str) and context.startswith("REGEX:"): self.uses_expression = True # Regular expression encoded into context # as "REGEX: '<expr>', FILTER: '<expr>'. REGEX, FILTER = "REGEX", "FILTER" expressions = eval("{%s}" % context) self.context = expressions.get(REGEX, "") self.filter = expressions.get(FILTER, "") elif context != self.context: if not isinstance(context, str): if ISecurityContext.providedBy(context): context = context.url else: raise TypeError("Context must be string or object" " providing ISecurityContext.") self.filter = "" self.context = context return self.context, self.filter def context_configuration(self, config): if self.configured_expression: config["context"] = self.context config["filter"] = self.filter config["uses_expression"] = "1" elif self.uses_expression: context = "REGEX: %r, FILTER: %r" % (self.context, self.filter) config["context"] = context else: config["context"] = self.context return config def configuration(self): config = super(Policy, self).configuration() config['acquires'] = str(self.acquires) config['is_default'] = as_boolean(self.is_default) self.context_configuration(config) self.__lock.acquire() try: config['rolemap'] = self.rolemap.copy() finally: self.__lock.release() return config def start(self): if not self.context: raise TypeError("Policy must have non-empty context") self.__lock.acquire() try: self.__verify_setup() self.__active.set() finally: self.__lock.release() super(Policy, self).start() def stop(self): if not self.parent.default is self: self.__active.clear() else: message = 'Policy "%s" is system default and has ' % self.name message += 'not been deactivated, although it has been stopped.' msglog.log('broadway', msglog.types.WARN, message) super(Policy, self).stop() def is_running(self): return self.__active.isSet() def is_removable(self): return not len(self.readonly) def is_configurable(self): return not self.is_default security.protect('prune', 'Manage Users') def prune(self): if not self.is_removable(): error = '%s "%s" is not removable.' raise TypeError(error % (type(self).__name__, self.name)) return super(Policy, self).prune() def __verify_setup(self): try: node = as_node(self.context) except: raise EInvalidValue("Invalid Context", "%s" % self.context) policies = list(self.parent.get_policies()) policies.remove(self) for policy in policies: if policy.uses_expression != self.uses_expression: continue if policy.context != self.context: continue # Policy contexts are same. message = ("Conflicting policy configurations detected." "Policies %s and %s have same context: '%s'.") msglog.log("broadway", msglog.types.WARN, message % (self, policy, self.context)) if policy.is_running(): message = "Running policy '%s' already has context: %s" raise TypeError(message % (policy.name, self.context)) else: message = ("%s ignoring context conflict: " "conflicting policy is not running.") msglog.log("broadway", msglog.types.WARN, message % self) permissions = self.parent.get_permissions() rolemap = self.rolemap.copy() for role, granted in rolemap.items(): if not self.parent.parent.role_manager.has_role(role): message = 'Policy "%s" ' % self.url message += 'removing role "%s". ' % role message += 'It does not exist.' msglog.log('broadway', msglog.types.WARN, message) del (self.rolemap[role]) elif isinstance(granted, (list, tuple)): for permission in granted: if permission not in permissions: message = 'Policy "%s" ' % self.url message += 'removing permission "%s" ' % permission message += 'from role "%s". ' % role message += 'Permission does not exist.' msglog.log('broadway', msglog.types.WARN, message) self.rolemap[role].remove(permission) return def get_permissions(self, role): if not isinstance(role, str): role = role.name return self.rolemap.get(role) security.protect('set_permissions', 'Manage Users') def set_permissions(self, role, *permissions): ## # Permissions parameter can be one of three things: # - A single list or tuple object, whose items will # replace the permissions tuple. # - A variable number of permission strings. # - A single callable object which will be called # and whose return value will be returned anytime # the permissions for this role are queried. if not isinstance(role, str): role = role.name if len(permissions) == 1: if type(permissions[0]) in (list, tuple): permissions = permissions[0][:] elif callable(permissions[0]): permissions = permissions[0] if not self.parent.parent.role_manager.has_role(role): raise ValueError('Role "%s" does not exist.' % role) if isinstance(permissions, (list, tuple)): defined = self.parent.get_permissions() for permission in permissions: if permission not in defined: raise ValueError('Permission "%s" not defined.' % permission) self.__lock.acquire() try: inherent = self.rolemap.callable_subset() self.rolemap[role] = permissions self.rolemap.update(inherent) finally: self.__lock.release() if inherent.has_key(role): message = 'Permissions for role "%s" in policy "%s" ' message += 'cannot be changed. An attempt has been ignored.' message = message % (role, self.name) msglog.log('broadway', msglog.types.WARN, message) return def rank_match(self, context): if not self.is_running(): raise ENotRunning('Not started. This may mean start failed.') if not isinstance(context, str): context = ISecurityContext(context).url if self.uses_expression: rank = self.rank_expression(context) else: rank = self.rank_overlap(context) return rank def rank_expression(self, context): context = urllib.unquote(context) match = re.match(self.context, context) if not match: count = 0 elif self.filter and re.match(self.filter, context): count = 0 else: matching = match.group() count = matching.count("/") if len(matching) > 1: count = count + 1 return count def rank_overlap(self, context): context = urllib.unquote(context) if not context.startswith(self.context): count = 0 else: count = self.context.count('/') if len(self.context) > 1: count = count + 1 return count
class Property(CompositeNode, ProxyMixin, MetaMixin): security = SecurityInformation.from_default() secured_by(security) def __init__(self, node_url=None): # initialize _children dict here to avoid __getattr__ sending it to # the linked node and then mixing the children of this property, # (ie. the Priority Array child) into the children of the linked node # also prevents unintended appearance of the linked node's children # under this Entity node self._children = {} CompositeNode.__init__(self) ProxyMixin.__init__(self) MetaMixin.__init__(self) self.node_url = node_url self._running = False self.__as_property = None self._outstanding_attributes = [] self._configured = False #True self._subject = None self._entity = None def configure(self, cd): CompositeNode.configure(self, cd) if self.is_remote(): cd['node_url'] = self.as_remote_url() self.set_remote_url(cd['node_url']) if not cd.has_key('type'): cd['type'] = self.parent.ptype for attr in PTYPE_MAP.get(self.parent.ptype)().supported_attrs(): set_attribute(self, attr, '', cd) if self.node_url: cd['node_url'] = self.node_url #auto-discover if "meta" in cd: self.configure_meta(cd["meta"]) set_attribute(self, 'node_url', REQUIRED, cd) self._configured = True def configuration(self): cd = CompositeNode.configuration(self) get_attribute(self, 'node_url', cd) if not self.is_remote(): for attr in PTYPE_MAP.get(self.parent.ptype)().supported_attrs(): get_attribute(self, attr, cd) return cd def start(self): # We don't want to call start on remote, thus avoid calling superclasses # start. self._running = True def stop(self): self._running = False def is_running(self): return self._running def as_property(self): if self.__as_property is None: prop_map = {} prop_class = PTYPE_MAP.get(self.parent.ptype) for attr in prop_class().supported_attrs(): prop_map[attr] = getattr(self, attr) prop_map['reference'] = self.as_node_url() self.__as_property = prop_class(**prop_map) return self.__as_property def get_child(self, name, **options): options['auto_discover'] = False return CompositeNode.get_child(self, name, **options) def has_child(self, name, **options): options['auto_discover'] = False return CompositeNode.has_child(self, name, **options) ## # @ fixme - this is a recipe for problems. We should explicitly delegate # to subject rather than that being the default. def has_cov(self): supports_cov = False if not self.is_remote(): try: sub = self.get_subject() if hasattr(sub, 'has_cov') and sub.has_cov(): supports_cov = True except: pass return supports_cov def changing_cov(self): changing_cov = False if not self.is_remote(): try: sub = self.get_subject() if hasattr(sub, 'changing_cov') and sub.changing_cov(): changing_cov = True except: pass return changing_cov def _public_interface(self): return self def nodecmp(a, b): return cmp(a, b) def __getattr__(self, name): if self._configured: subject = self.get_subject() if hasattr(subject, name): return getattr(subject, name) raise AttributeError(name) def get_subject(self): if self._subject is None: mp = self.get_mount_point() if mp is not None: self._subject = mp.as_remote_node(self.node_url) else: self._subject = as_node(self.node_url) if isinstance(self._subject, BINObjectInstance): if self._subject.parent.name == '17': # a reference to a bacnet scheduled embedded # in a device. cd = { 'name': 'Scheduler', 'parent': self, 'source': 'broadway', 'link': self.node_url } sched_instance = BACnetSchedulerIface() sched_instance.configure(cd) self._subject = sched_instance self.set_subject(self._subject) return self._subject def _get_entity(self): if self._entity is None: self._entity = self.parent.parent return self._entity entity = property(_get_entity) def nodebrowser_handler(self, nb, path, node, node_url): if self.node_url is None: return nb.get_default_view(node, node_url) #create html for link to nodebrowser node_url block = ['<div class="node-section node-link">'] block.append('<h2 class="section-name">Node Link</h2>') block.append('<ul class="node-link">') block.append('<li>') s = 'nodebrowser' + self.node_url block.append('<a href="/%s">/%s</a><br>' % (s, s)) block.append("</li>\n</ul>") block.append("</div>") node_link = "\n".join(block) # get default dict from node browser and add link dct = nb.get_default_presentation(node, node_url) dct['node-link'] = node_link # answer the html for the modified node browser page return ''' %(node-hierarchy)s %(node-name)s %(node-link)s %(node-children)s %(node-configuration)s %(node-persistence)s''' % dct
class AlarmManager(CompositeNode): implements(IAlarmManager) security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args, **kw): super(AlarmManager, self).__init__(*args, **kw) self.dispatcher = Dispatcher('Alarm Manager') self.cloud = None self.max_raised = 5 self.max_cleared = 5 self.max_accepted = 5 self.use_https = False self.remote_events = {} security.protect('add_alarm', 'Configure') def add_alarm(self, alarm): self.add_child(alarm) security.protect('remove_alarm', 'Configure') def remove_alarm(self, name): self.get_alarm(name).prune() def get_alarm(self, name): return self.get_child(name) def get_alarms(self): return self.children_nodes() def get_alarm_names(self): return self.children_names() def get_alarm_dictionary(self): return self.children def get_remote_events(self): return self.remote_events.values() def get_local_events(self): events = [] alarms = self.get_alarms() eventlists = map(Alarm.get_events, alarms) map(events.extend, eventlists) return events def get_all_events(self): return self.get_local_events() + self.get_remote_events() def get_event_dictionary(self, order_by='state', source='all'): try: events = getattr(self, 'get_' + source + '_events')() except AttributeError: raise ValueError('"%s" invalid value for source.' % source) eventdict = {} if order_by == 'state': for event in events: eventdict.setdefault(event.state, []).append(event) elif order_by == 'GUID' or order_by == 'guid': for event in events: eventdict[event.GUID] = event else: raise ValueError('"%s" invalid value for order_by' % order_by) return eventdict def get_events_by_state(self, state, source='all'): eventdict = self.get_event_dictionary('state', source) return eventdict.get(state.upper(), []) def handle_event_resend(self, cloudevent): self.message('Received Event Resend event %s' % (cloudevent)) self.send_alarm_events_to_portal(cloudevent.origin) def send_alarm_events_to_portal(self, target): count = 0 for alarm in self.get_alarms(): for event in alarm.get_events(): self.cloud.send_event_to_portal(event, ['Alarm Manager'], target) count += 1 self.message('Dispatched %s events because of Event Resend Request' % count) return count def handle_cloud_event(self, cloudevent): event = cloudevent() def dispatch(self, event, *args, **kw): result = self.dispatcher.timed_dispatch(event, *args, **kw) if isinstance(event, StateEvent): alarmevent = event.get_alarm_event() if alarmevent.is_local(): self.cloud.handle_local_event(alarmevent, ['Alarm Manager']) else: guid = alarmevent.GUID if not self.remote_events.has_key(guid): self.remote_events[guid] = alarmevent if isinstance(event, AlarmEventClosed): del (self.remote_events[guid]) return result def handle_cloud_change(self, event): count = 0 for alarm in self.get_alarms(): for event in alarm.get_events(): self.cloud.handle_local_event(event, ['Alarm Manager']) count += 1 self.message('Dispatched %s events because Cloud Change.' % count) self.dispatch(event) return count def register_for_type(self, *args, **kw): return self.dispatcher.register_for_type(*args, **kw) def configure(self, config): set_attribute(self, 'use_https', self.use_https, config, as_boolean) set_attribute(self, 'max_raised', 5, config, int) set_attribute(self, 'max_cleared', 5, config, int) set_attribute(self, 'max_accepted', 5, config, int) return super(AlarmManager, self).configure(config) def configuration(self): config = super(AlarmManager, self).configuration() get_attribute(self, 'use_https', config, str) get_attribute(self, 'max_raised', config, str) get_attribute(self, 'max_cleared', config, str) get_attribute(self, 'max_accepted', config, str) return config def start(self): self.message('Alarm Manager starting.') server = self.nodespace.as_node('/services/network/http_server') if not server.is_enabled() or self.use_https: server = self.nodespace.as_node('/services/network/https_server') config = { 'parent': server, 'enabled': 1, 'secured': True, 'debug': self.debug } syndic_handler_config = config.copy() config_handler_config = config.copy() cloud_handler_config = config.copy() cloud_config_handler_config = config.copy() exporter_handler_config = config.copy() trigger_handler_config = config.copy() syndic_handler_config['name'] = 'Syndication Viewer' syndic_handler_config['path'] = '/syndication' config_handler_config['name'] = 'Alarm Configurator' config_handler_config['path'] = '/alarmconfig' cloud_handler_config['name'] = 'Cloud Handler' cloud_handler_config['path'] = '/cloud' cloud_config_handler_config['name'] = 'Cloud Configurator' cloud_config_handler_config['path'] = '/cloudconfig' exporter_handler_config['name'] = 'Exporter Configurator' exporter_handler_config['path'] = '/exportconfig' trigger_handler_config['name'] = 'Trigger Configurator' trigger_handler_config['path'] = '/triggerconfig' ## # Frist create and setup Cloud Manager so events produced by # startup of configurators can be propogated properly. startlist = [] from mpx.service.cloud.manager import CloudManager cloud_manager = self.nodespace.create_node(CloudManager) cloud_manager.configure({ 'name': 'Cloud Manager', 'parent': '/services', 'debug': self.debug }) self.cloud = cloud_manager startlist.append(cloud_manager) from mpx.service.cloud import request_handler cloud_handler_config['manager'] = '/services/Cloud Manager' cloud_handler = self.nodespace.create_node( request_handler.CloudHandler) cloud_handler.configure(cloud_handler_config) del request_handler startlist.insert(0, cloud_handler) from mpx.service.cloud.xhtml.configuration import request_handler cloud_config_handler_config['manager'] = '/services/Cloud Manager' cloud_config_handler = self.nodespace.create_node( request_handler.CloudConfigurator) cloud_config_handler.configure(cloud_config_handler_config) del request_handler startlist.append(cloud_config_handler) for cloudservice in startlist: cloudservice.start() self.message( 'Alarm Manager configured and started Cloud Manager, Handler, and Configurator.' ) ## # Syndication Handler is idempotent and so can be started anytime. from mpx.service.alarms2.presentation.syndication.http import request_handler syndic_handler = request_handler.SyndicationViewer() syndic_handler.configure(syndic_handler_config) del request_handler syndic_handler.start() self.message( 'Alarm Manager configured and started Syndication Handler.') ## # Startup Alarm Manager's configurator so that pickled Alarm Events may be # recreated. from mpx.service.alarms2.presentation.xhtml.configuration import request_handler config_handler = self.nodespace.create_node( request_handler.AlarmConfigurator) config_handler.configure(config_handler_config) del request_handler config_handler.start() self.message('Alarm Manager created and started Alarm Configurator.') ## # Now that old Alarm Events have been recreated, configure and # startup Exporters. from mpx.service.alarms2.export import exporter container = self.nodespace.create_node(exporter.ExporterContainer) container.configure({ 'name': 'Alarm Exporters', 'parent': '/services', 'debug': self.debug }) export = self.nodespace.create_node(exporter.AlarmExporter) export.configure({ 'name': 'Alarm Logger', 'parent': container, 'debug': self.debug }) formatter = self.nodespace.create_node( exporter.AlarmDictionaryFormatter) formatter.configure({ 'name': 'Log Formatter', 'parent': export, 'debug': self.debug }) transporter = self.nodespace.create_node(exporter.LoggingTransporter) transporter.configure({ 'name': 'Alarm Logger', 'log': '/services/logger/Alarm Log', 'parent': export, 'debug': self.debug }) export.add_source(self, StateEvent) container.start() self.message('Created and started alarm exporters and logger.') from mpx.service.alarms2.export.xhtml.configuration import request_handler export_config_handler = self.nodespace.create_node( request_handler.ExportersConfigurator) export_config_handler.configure(exporter_handler_config) del request_handler export_config_handler.start() self.message( 'Alarm Manager created and started Exporter Configurator.') self.cloud.add_listener(self.handle_cloud_event, 'Alarm Manager') self.cloud.add_listener(self.handle_event_resend, 'EventResend') from mpx.service.cloud.manager import FormationUpdated self.cloud.dispatcher.register_for_type(self.handle_cloud_change, FormationUpdated) self.message( 'Alarm Manager added itself as listender for Cloud Events.') ## # With all supporting infrastructure started, start triggers which may # immediately generate alarms. from mpx.service.alarms2.trigger import triggermanager trigger_manager = self.nodespace.create_node( triggermanager.TriggerManager) trigger_manager.configure({ 'name': 'Trigger Manager', 'parent': '/services', 'debug': self.debug }) del triggermanager trigger_manager.start() self.message('Alarm Manager created and started Trigger Manager.') from mpx.service.alarms2.trigger.xhtml.configuration import request_handler trigger_config_handler = self.nodespace.create_node( request_handler.TriggersConfigurator) trigger_config_handler.configure(trigger_handler_config) del request_handler self.message('Alarm Manager created and started Trigger Configurator.') trigger_config_handler.start() try: store = as_node("/services/Event Store") except KeyError: msglog.inform("Alarm Manager creating Event Store.") from mpx.service.alarms2 import store estore = store.EventStore() estore.configure({"name": "Event Store", "parent": "/services"}) estore.start() msglog.inform("Alarm Manager setup and started Event Store.") else: msglog.inform("Alarm Manager found existing Event Store.") super(AlarmManager, self).start() self.message('Alarm Manager startup complete.') def message(self, message, mtype=msglog.types.INFO): if (mtype != msglog.types.DB) or self.debug: msglog.log('broadway', mtype, message) return
class GlobalSetpointGroup(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self, **kw): # {k:v} ... k == gsp_identifer, v == [{'entity_path':'entity_prop', ...}] # f.e, {'unique_handle':[{'/path/to/entity':'(pt_type, pt_name)', ...}] self._entity_map = {} self._group_cfg = GroupCfg() for name, value in kw.items(): if hasattr(self, name): setattr(self, attr, value) @do_persist def configure(self, config): super(GlobalSetpointGroup, self).configure(config) set_attribute(self, 'entity_path', REQUIRED, config) def configuration(self): config = super(GlobalSetpointGroup, self).configuration() get_attribute(self, 'entity_path', config) return config @do_persist def update_group_config(self, config): self._group_cfg = GroupCfg() for setpoint_item in config: if not isinstance(setpoint_item, GroupSetpointItem): setpoint_item = GroupSetpointItem(setpoint_item) self._group_cfg.append(setpoint_item) def get_group_config(self): return self._group_cfg ## # Save or update the group information associated with the referenced entity # with the specified configuration. # # @param config The group configuration data. # @return A dictionary providing information about the group configuration. security.protect('update_group', 'Configure') def update_group(self, config): self.update_group_config(config) return self.get_group_config() ## # Return a list of the entities managed by the group. # # @return A list of paths to the entities that are managed by this node. security.protect('get_entities_paths', 'View') def get_entities_paths(self): paths = {} # dict used for uniqueness of keys for entity_maps in self._entity_map.values(): for entity_map in entity_maps: paths[entity_map.entity_path] = None return paths.keys() ## # Add or update one or more entities to the list of entities managed by this # group and configure the mapping between the entities properties and setpoint # items. # # @param entities A list of Entity Mapping's. # @return A list of entity mappings that are managed by this node. security.protect('update_entity_mapping', 'Configure') @do_persist def update_entity_mapping(self, entity_map): _entity_map = {} for setpoint_id, e_maps in entity_map.items(): _entity_map[setpoint_id] = [] for e_map in e_maps: if not isinstance(e_map, EntityMapping): if not e_map.has_key('setpoint_id'): e_map['setpoint_id'] = setpoint_id e_map = EntityMapping(e_map) _entity_map[setpoint_id].append(e_map) self._entity_map = _entity_map return self._entity_map ## # Retrieve the current entity map associated with this global setpoint group # # @return A list of entity mappings that are managed by this node. security.protect('get_entity_mapping', 'View') def get_entity_mapping(self): return self._entity_map ## # Delete a set of entity mappings from the entities managed by this # Global Setpoint Group. # # @param entity_map The Entity Mappings that are to be removed. # @return A list of entity mappings that are managed by this node. security.protect('remove_entity_mapping', 'Configure') @do_persist def remove_entity_mapping(self, entity_map): for setpoint_id, e_maps in entity_map.items(): if not self._entity_map.has_key(setpoint_id): continue for e_map in e_maps: if not isinstance(e_map, EntityMapping): if not e_map.has_key('setpoint_id'): emap['setpoint_id'] = setpoint_id e_map = EntityMapping(e_map) for existing_emap in self._entity_map.get(setpoint_id): if e_map == existing_emap: self._entity_map[setpoint_id].remove(existing_emap) if not self._entity_map[setpoint_id]: # empty list, delete it del self._entity_map[setpoint_id] return self._entity_map ## # Pushes the values of the setpoint items to the properties of the entities # mapped to each setpoint item. # # @return An identifier for the transaction or process id that can be # queried later to find out the progress of the process. If a GSG controls # a large number of groups, then the call will take a long time to return. # This strategy avoids the possible HTTP network timeout that would occur if # the framework took too long to set all of the entities. Instead, the client # will being a poll to check for the process. security.protect('push_values', 'Override') def push_values(self): command_set = [] for setpoint_item in self.get_group_config(): setpoint_id = setpoint_item.setpoint_id value = setpoint_item.value priority = setpoint_item.priority for entity_map in self.get_entity_mapping().get(setpoint_id): try: property = entity_map.get_property_reference() except: message = 'Error pushing value to %s' % ( entity_map.entity_path) msglog.log('Global Setpoint Manager', msglog.types.INFO, message) msglog.exception() continue if hasattr(property, 'override'): command = OverrideCommand(property, value, priority) elif hasattr(property, 'set'): command = SetCommand(property, value) else: continue command_set.append(command) command_set = CommandSet(command_set) COMMAND_MANAGER.enqueue(command_set) return command_set.get_transaction_id() ## # Releases the values of the setpoint items to the properties of the entities # mapped to each setpoint item. # # @return An identifier for the transaction or process id that can be # queried later to find out the progress of the process. If a GSG controls # a large number of groups, then the call will take a long time to return. # This strategy avoids the possible HTTP network timeout that would occur if # the framework took too long to set all of the entities. Instead, the client # will being a poll to check for the process. security.protect('release_setpoint', 'Override') def release_setpoint(self, setpoint_id, priority_level): command_set = [] if ((len(self.get_entity_mapping()) != 0) and (setpoint_id in self.get_entity_mapping().keys())): ## # Entity map is not empty and also contains an entry for the # setpoint_id, proceed ... ## for entity_map in self.get_entity_mapping().get(setpoint_id): try: property = entity_map.get_property_reference() except: message = 'Error releasing override for %s' % ( entity_map.entity_path) msglog.log('Global Setpoint Manager', msglog.types.INFO, message) msglog.exception() continue if not hasattr(property, 'release'): continue command_set.append(ReleaseCommand(property, priority_level)) command_set = CommandSet(command_set) COMMAND_MANAGER.enqueue(command_set) return command_set.get_transaction_id() ## # Requests the progress of the push values for the specified transaction_id # # @param transaction_id An array of entity mappings. # @return A transaction status object containing the following attributes. # - completed: True if the process has completed, False otherwise. # - success: Present only if "completed" is set to True. True if the # process managed to set all the properties, False otherwise. # - report_items: Present only if completed is set to True. An array of # Error Report Items. security.protect('get_push_values_progress', 'View') def get_push_values_progress(self, transaction_id): return COMMAND_MANAGER.get_push_values_progress(transaction_id) ## # Creates a poll group for the values of the properties of the managed entities # in the setpoint group. # # @param # @return A string value representing the identifer of the poll group. security.protect('create_polled', 'View') def create_polled(self, node_reference_table=None, timeout=300): if node_reference_table is None: node_reference_table = {} for setpoint_id in self._entity_map.keys(): for entity_map in self._entity_map.get(setpoint_id, []): try: property = entity_map.get_property_reference() except: continue entity_path = entity_map.entity_path nrt_id = str([setpoint_id, entity_path]) node_reference_table[nrt_id] = property return SM.create_polled(node_reference_table, timeout) ## # Destroys a poll group # # @param poll_id A string value representing the identifier of the poll group. # @return None security.protect('destroy', 'View') def destroy(self, poll_id): return SM.destroy(poll_id) ## # Polls for the values of the entities managed by the setpoint group given the # poll_id # # @param poll_id The string value representing the identifier of the # poll group, as returned from create_polled. # @return A dictionary keyed by "(entity_path, property)", the value being a # result object. security.protect('poll_all', 'View') def poll_all(self, poll_id): return SM.poll_all(poll_id) ## # Like poll_all but returns only those entity/property pairs whose values # have changed since the last time poll_* was called, referencing a particular # poll_id # # @param poll_id The string value representing the identifier of the # poll group, as returned from create_polled. # @return A dictionary keyed by "(entity_path, property)", the value being a # result object. security.protect('poll_changed', 'View') def poll_changed(self, poll_id): return SM.poll_changed(poll_id)