Exemple #1
0
class NodeConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.nodes = None
        super(NodeConfigurator, self).__init__(*args, **kw)
    def start(self):
        if self.nodes is None:
            dictname = "%s (%s)" % (type(self).__name__, self.name)
            self.nodes = PersistentDictionary(dictname)
            nodeurls = self.nodes.keys()
            nodeurls.sort(pathcompare)
            for nodeurl in nodeurls:
                nodedata = self.nodes[nodeurl]
                factory,configuration = nodedata
                self.create_node(factory, nodeurl, **configuration)
        super(NodeConfigurator, self).start()
    def get_managed_node(self, nodeurl):
        if not self.nodes.has_key(nodeurl):
            raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl)
        return as_node(nodeurl)
    def node_children(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.children_names()
    def node_configuration(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.configuration()
    def start_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.start()
    def stop_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.stop()
    def node_attr(self, nodeurl, name, value=Undefined):
        node = self.get_managed_node(nodeurl)
        if value is not Undefined:
            setattr(node, name, value)
            self.updatepdo(nodeurl, node)
        return getattr(node, name)
    def configure_node(self, nodeurl, config):
        node = self.get_managed_node(nodeurl)
        node.stop()
        try:
            node.configure(config)
        except:
            msglog.log("broadway", msglog.types.WARN, 
                       "Error prevented reconfiguration of node: %s" % node)
            msglog.exception(prefix="handled")
            msglog.log("broadway", msglog.types.WARN, 
                       "Rolling back configuration.")
            try:
                node.configure(self.nodes[nodeurl])
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Configuration rollback failed.")
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO, 
                           "Rollback of configuration succeeded.")
        else:
            msglog.log("broadway", msglog.types.INFO, 
                       "Node reconfigured: %s" % node)
            self.updatepdo(nodeurl, node)
        finally:
            node.start()
        return node.configuration()
    def create_node(self, factory, nodeurl, **config):
        try:
            as_node(nodeurl)
        except KeyError:
            pass
        else:
            raise TypeError("Node exists: %s" % nodeurl)
        if isinstance(factory, str):
            module,sep,name = factory.rpartition(".")
            if name:
                exec("import %s" % module)
            factory = eval(factory)
        parent,sep,name = nodeurl.rpartition("/")
        configuration = {"name": name, "parent": parent}
        configuration.update(config)
        node = factory()
        try:
            node.configure(configuration)
        except:
            msglog.log("broadway", msglog.types.WARN, 
                       "Error prevented configuration of new node: %s" % node)
            msglog.exception(prefix="handled")
            try:
                node.prune()
            except:
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO, 
                           "Node successfully pruned.")
        else:
            msglog.log("broadway", msglog.types.INFO, 
                       "New node created: %s" % node)
            self.updatepdo(nodeurl, node)
            node.start()
        return node.configuration()
    def remove_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.prune()
        self.updatepdo(nodeurl, None)
    def updatepdo(self, nodeurl, node):
        if self.nodes.has_key(nodeurl):
            self.nodes.pop(nodeurl)
        if node:
            node = as_node(node)
            nodeurl = as_node_url(node)
            datatype = type(node)
            factory = "%s.%s" % (datatype.__module__, datatype.__name__)
            data = (factory, node.configuration())
            self.nodes[nodeurl] = (factory, node.configuration())
        return nodeurl
Exemple #2
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'node_config':node_config, 
        #            'group_config':group_config, 
        #            'entity_map':entity_map}}
        self._persisted_data = PersistentDictionary('GSPData')
        self.debug = 1
        self._persist_enabled = False
        
    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Global Setpoint Manager', mtype, message)
        
    def persist_enabled(self):
        return self._persist_enabled
    
    def enable_persist(self):
        self._persist_enabled = True
        
    def disable_persist(self):
        self._persist_enabled = False
        
    def get_gsp_groups(self):
        groups = self._persisted_data.keys()
        groups.sort(lambda a,b: cmp(a.count('/'), b.count('/')))
        return groups
        
    def get_gsp_group(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]
    
    def put_gsp_group(self, nodepath, nodedata):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            data = {'node_config':{},
                    'group_config':[],
                    'entity_map':{},
                    'node_factory':''}
            self._persisted_data[nodepath] = data
        self.put_gsp_group_data(nodepath, nodedata)
        
    def remove_gsp_group(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            del self._persisted_data[nodepath]
            
    def get_gsp_group_data(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        return self._persisted_data[nodepath] 
    
    def put_gsp_group_data(self, nodepath, nodedata):
        nodepath = normalize_nodepath(nodepath)
        for data_key in self._persisted_data[nodepath].keys():
            value = nodedata.get(data_key)
            if value is not None:
                self._put_entry(nodepath, data_key, value)
    
    def get_gsp_group_nconfig(self, nodepath):
        # node configuration data
        return self._get_entry(nodepath, 'node_config')
    
    def put_gsp_group_nconfig(self, nodepath, value):
        # node configuration data
        self._put_entry(nodepath, 'node_config', value)
    
    def get_gsp_group_gconfig(self, nodepath):
        # gsp group configuration data
        return self._get_entry(nodepath, 'group_config')
    
    def putt_gsp_group_gconfig(self, nodepath, value):
        # gsp group configuration data
        self._put_entry(nodepath, 'group_config', value)
    
    def get_gsp_group_entity_map(self, nodepath):
        return self._get_entry(nodepath, 'entity_map')
    
    def put_gsp_group_entity_map(self, nodepath, value):
        self._put_entry(nodepath, 'entity_map', value)
        
    def _get_entry(self, nodepath, data_type):
        return self.get_gsp_group(normalize_nodepath(nodepath))[data_type]
        
    def _put_entry(self, nodepath, data_type, value):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        group = self.get_gsp_group(nodepath)
        assert group, \
        'A group must exist before data can be stored against it.'
        group[data_type] = value
        self._persisted_data.notify_changed(nodepath)        
            
    def singleton_unload_hook(self):
        pass
Exemple #3
0
class TriggersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)
    
    def __init__(self, *args):
        self._triggers = None
        self.security_manager = None
        self._pdo_lock = Lock()
        super(TriggersConfigurator, self).__init__(*args)
    def configure(self, config):
        self.setattr('path', config.get('path','/triggerconfig'))
        self.setattr('manager', config.get('container','/services/Trigger Manager'))
        self.secured = as_internal_node("/services").secured
        super(TriggersConfigurator, self).configure(config)
    def configuration(self):
        config = super(TriggersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        return config
    def start(self):
        filename = '%s (%s)' % (self.name, 'triggers')
        self.manager = self.nodespace.as_node(self.manager)
        self._pdo_lock.acquire()
        try:
            if self._triggers is None:
                self._triggers = PersistentDictionary(
                    filename, encode=None, decode=None)
            if not self._triggers:
                pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                if os.path.exists(pdodata.filename()):
                    msglog.log('broadway', msglog.types.INFO, 
                               "Migrating previous trigger data.")
                    pdodata.triggers = {}
                    pdodata.load()
                    self._triggers.update(pdodata.triggers)
                    pdodata.destroy()
                del(pdodata)
            self._loadtriggers()
            if self.secured:
                self.security_manager = self.as_node("/services/Security Manager")
            else:
                self.security_manager = None
        finally: 
            self._pdo_lock.release()
        return super(TriggersConfigurator, self).start()
    def stop(self):
        super(TriggersConfigurator, self).stop()
        self.manager = None
    def _loadtriggers(self, names=None):
        triggers = []
        if names is None:
            names = self._triggers.keys()
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        for name in names:
            dump = self._triggers[name]
            try:
                trigger = unmarshal(dump)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to load trigger: %s" % name)
                msglog.exception(prefix="handled")
            else:
                triggers.append(trigger)
        return triggers
    def _storetriggers(self, triggers=None):
        if triggers is None:
            triggers = self.manager.get_triggers()
        elif not isinstance(triggers, (list, set, tuple)):
            triggers = [triggers]
        for trigger in triggers:
            try:
                dump = marshal(trigger)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to marshal trigger: %s" % trigger.name)
                msglog.exception(prefix="handled")
            else:
                self._triggers[trigger.name] = dump
        return triggers
    def _poptriggers(self, names=None):
        if names is None:
            existing = set(self.manager.get_trigger_names())
            stored = self._triggers.keys()
            names = set(stored) - set(existing)
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        removed = []
        for name in names:
            try:
                self._triggers.pop(name)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to remove trigger data: %s" % name)
                msglog.exception(prefix="handled")
            else:
                removed.append(name)
        return removed
    def match(self, path):
        return path.startswith(self.path)
    security.protect('create_trigger', 'Configure')
    security.protect('create_node', 'Configure')
    def create_trigger(self, name, config=()):
        config = dict(config)
        if "type" in config:
            type = config.pop("type")
        else:
            type = "ComparisonTrigger"
        if isinstance(type, str):
            if type.endswith("ComparisonTrigger"):
                type = ComparisonTrigger
            elif type.endswith("BoundTrigger"):
                type = BoundTrigger
            else:
                raise ValueError("Uknown type: %r" % type)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        trigger = self._create_trigger(type, config)
        self._storetriggers([trigger])
        return trigger.name
    create_node = create_trigger
    
    security.protect('remove_trigger', 'Configure')
    security.protect('remove_node', 'Configure')
    def remove_trigger(self, name):
        self._remove_trigger(name)
        self._poptriggers([name])
        return name
    remove_node = remove_trigger
    
    security.protect('configure_trigger', 'Configure')
    security.protect('configure_node', 'Configure')
    def configure_trigger(self, name=None, config=()):
        config = dict(config)
        if name is None:
            if config.has_key("name"):
                name = config["name"]
            else:
                raise TypeError("configure_trigger() requires"
                                " name or configuration with name")
        trigger = self.manager.get_trigger(name)
        try: 
            trigger.stop()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on stop.')
            msglog.exception(prefix = 'Handled')
        trigger.configure(config)
        try: 
            trigger.start()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on start.')
            msglog.exception(prefix = 'Handled')
class TriggersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        self._triggers = None
        self.security_manager = None
        self._pdo_lock = Lock()
        super(TriggersConfigurator, self).__init__(*args)

    def configure(self, config):
        self.setattr('path', config.get('path', '/triggerconfig'))
        self.setattr('manager',
                     config.get('container', '/services/Trigger Manager'))
        self.secured = as_internal_node("/services").secured
        super(TriggersConfigurator, self).configure(config)

    def configuration(self):
        config = super(TriggersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        return config

    def start(self):
        filename = '%s (%s)' % (self.name, 'triggers')
        self.manager = self.nodespace.as_node(self.manager)
        self._pdo_lock.acquire()
        try:
            if self._triggers is None:
                self._triggers = PersistentDictionary(filename,
                                                      encode=None,
                                                      decode=None)
            if not self._triggers:
                pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                if os.path.exists(pdodata.filename()):
                    msglog.log('broadway', msglog.types.INFO,
                               "Migrating previous trigger data.")
                    pdodata.triggers = {}
                    pdodata.load()
                    self._triggers.update(pdodata.triggers)
                    pdodata.destroy()
                del (pdodata)
            self._loadtriggers()
            if self.secured:
                self.security_manager = self.as_node(
                    "/services/Security Manager")
            else:
                self.security_manager = None
        finally:
            self._pdo_lock.release()
        return super(TriggersConfigurator, self).start()

    def stop(self):
        super(TriggersConfigurator, self).stop()
        self.manager = None

    def _loadtriggers(self, names=None):
        triggers = []
        if names is None:
            names = self._triggers.keys()
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        for name in names:
            dump = self._triggers[name]
            try:
                trigger = unmarshal(dump)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to load trigger: %s" % name)
                msglog.exception(prefix="handled")
            else:
                triggers.append(trigger)
        return triggers

    def _storetriggers(self, triggers=None):
        if triggers is None:
            triggers = self.manager.get_triggers()
        elif not isinstance(triggers, (list, set, tuple)):
            triggers = [triggers]
        for trigger in triggers:
            try:
                dump = marshal(trigger)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to marshal trigger: %s" % trigger.name)
                msglog.exception(prefix="handled")
            else:
                self._triggers[trigger.name] = dump
        return triggers

    def _poptriggers(self, names=None):
        if names is None:
            existing = set(self.manager.get_trigger_names())
            stored = self._triggers.keys()
            names = set(stored) - set(existing)
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        removed = []
        for name in names:
            try:
                self._triggers.pop(name)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to remove trigger data: %s" % name)
                msglog.exception(prefix="handled")
            else:
                removed.append(name)
        return removed

    def match(self, path):
        return path.startswith(self.path)

    security.protect('create_trigger', 'Configure')
    security.protect('create_node', 'Configure')

    def create_trigger(self, name, config=()):
        config = dict(config)
        if "type" in config:
            type = config.pop("type")
        else:
            type = "ComparisonTrigger"
        if isinstance(type, str):
            if type.endswith("ComparisonTrigger"):
                type = ComparisonTrigger
            elif type.endswith("BoundTrigger"):
                type = BoundTrigger
            else:
                raise ValueError("Uknown type: %r" % type)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        trigger = self._create_trigger(type, config)
        self._storetriggers([trigger])
        return trigger.name

    create_node = create_trigger

    security.protect('remove_trigger', 'Configure')
    security.protect('remove_node', 'Configure')

    def remove_trigger(self, name):
        self._remove_trigger(name)
        self._poptriggers([name])
        return name

    remove_node = remove_trigger

    security.protect('configure_trigger', 'Configure')
    security.protect('configure_node', 'Configure')

    def configure_trigger(self, name=None, config=()):
        config = dict(config)
        if name is None:
            if config.has_key("name"):
                name = config["name"]
            else:
                raise TypeError("configure_trigger() requires"
                                " name or configuration with name")
        trigger = self.manager.get_trigger(name)
        try:
            trigger.stop()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on stop.')
            msglog.exception(prefix='Handled')
        trigger.configure(config)
        try:
            trigger.start()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on start.')
            msglog.exception(prefix='Handled')
Exemple #5
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'cfg':cfg, 
        #            'summary':summary, 
        #            'meta':meta, 
        #            'properties':properties,
        #            'fail_list':fail_list,
        #            'sync_state':sync_in_progress,
        #            'override':override}}
        self._persisted_data = PersistentDictionary('ScheduleData')
        self.debug = 1
        
    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Scheduler', mtype, message)
        
    def get_scheds(self):
        scheds = self._persisted_data.keys()
        scheds.sort(sched_sort)
        return scheds
        
    def get_sched(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]
    
    def put_sched(self, nodepath, cfg):
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            pdata = {'cfg':{},
                     'summary':[[], [], [], 'exceptions'],
                     'meta':{},
                     'properties':[],
                     'fail_list':[],
                     'sync_state':False,
                     'override':False}
            self._persisted_data[nodepath] = pdata
        self.put_sched_cfg(nodepath, cfg)
        
    def remove_sched(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            for sched in self.get_scheds():
                if sched.startswith(nodepath):
                    del self._persisted_data[sched]
        else:
            msg = 'Error removing non-existent schedule %s from persistent data.' \
                % nodepath
            self.message(msg)
                         
    def move_sched(self, source, destination, cfg, is_rename=False):
        source = normalize_nodepath(source)
        destination = normalize_nodepath(destination)
        for sched in self.get_scheds():
            if not sched.startswith(source):
                continue
            data = self._persisted_data[sched]
            del self._persisted_data[sched]
            if sched == source:
                # rename
                if is_rename:
                    newsched = destination
                else:
                    newsched = sched.replace(source, destination) + source.split('/')[-2] + '/'
                oldroot = sched
                newroot = newsched
                self._persisted_data[newsched] = data 
                # prior to persisting, the schedule should have been moved
                # within the nodetree.  We grab and persist the latest configuration.
                # This put call will also ensure sync to disk to takes place.
                self.put_sched_cfg(newsched, cfg)
            else:
                newsched = normalize_nodepath(sched.replace(oldroot, newroot)) #+ sched_name + '/'
                self._persisted_data[newsched] = data 
                self.put_sched_cfg(newsched, serialize_node(as_node(newsched)))
                    
    def get_sched_cfg(self, nodepath):
        return self._get_entry('cfg', nodepath)
    
    def put_sched_cfg(self, nodepath, cfg):
        self._put_entry('cfg', nodepath, cfg)
        
    def get_sched_summary(self, nodepath):
        return self._get_entry('summary', nodepath)
    
    def put_sched_summary(self, nodepath, summary):
        self._put_entry('summary', nodepath, summary)
        
    def get_sched_props(self, nodepath):
        return self._get_entry('properties', nodepath)
    
    def put_sched_props(self, nodepath, properties):
        self._put_entry('properties', nodepath, properties)
                    
    def get_sched_meta(self, nodepath):
        return self._get_entry('meta', nodepath)
    
    def put_sched_meta(self, nodepath, meta):
        self._put_entry('meta', nodepath, meta)
                
    def get_fail_list(self, nodepath):
        return self._get_entry('fail_list', nodepath)
    
    def put_fail_list(self, nodepath, fail_list):
        self._put_entry('fail_list', nodepath, fail_list)
        
    def get_sync_state(self, nodepath):
        return self._get_entry('sync_state', nodepath)
    
    def put_sync_state(self, nodepath, sync_state):
        self._put_entry('sync_state', nodepath, sync_state)
        
    def get_override(self, nodepath):
        return self._get_entry('override', nodepath)
    
    def put_override(self, nodepath, override):
        self._put_entry('override', nodepath, override)
        
    def _get_entry(self, ptype, nodepath):
        return self.get_sched(normalize_nodepath(nodepath))[ptype]
        
    def _put_entry(self, ptype, nodepath, value):
        nodepath = normalize_nodepath(nodepath)
        sched = self.get_sched(nodepath)
        assert sched, \
        'A schedule must exist before data can be stored against it.'
        sched[ptype] = value
        self._persisted_data.notify_changed(nodepath)        
            
    def singleton_unload_hook(self):
        pass
Exemple #6
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'cfg':cfg,
        #            'summary':summary,
        #            'meta':meta,
        #            'properties':properties,
        #            'fail_list':fail_list,
        #            'sync_state':sync_in_progress,
        #            'override':override}}
        self._persisted_data = PersistentDictionary('ScheduleData')
        self.debug = 1

    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Scheduler', mtype, message)

    def get_scheds(self):
        scheds = self._persisted_data.keys()
        scheds.sort(sched_sort)
        return scheds

    def get_sched(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]

    def put_sched(self, nodepath, cfg):
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            pdata = {
                'cfg': {},
                'summary': [[], [], [], 'exceptions'],
                'meta': {},
                'properties': [],
                'fail_list': [],
                'sync_state': False,
                'override': False
            }
            self._persisted_data[nodepath] = pdata
        self.put_sched_cfg(nodepath, cfg)

    def remove_sched(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            for sched in self.get_scheds():
                if sched.startswith(nodepath):
                    del self._persisted_data[sched]
        else:
            msg = 'Error removing non-existent schedule %s from persistent data.' \
                % nodepath
            self.message(msg)

    def move_sched(self, source, destination, cfg, is_rename=False):
        source = normalize_nodepath(source)
        destination = normalize_nodepath(destination)
        for sched in self.get_scheds():
            if not sched.startswith(source):
                continue
            data = self._persisted_data[sched]
            del self._persisted_data[sched]
            if sched == source:
                # rename
                if is_rename:
                    newsched = destination
                else:
                    newsched = sched.replace(
                        source, destination) + source.split('/')[-2] + '/'
                oldroot = sched
                newroot = newsched
                self._persisted_data[newsched] = data
                # prior to persisting, the schedule should have been moved
                # within the nodetree.  We grab and persist the latest configuration.
                # This put call will also ensure sync to disk to takes place.
                self.put_sched_cfg(newsched, cfg)
            else:
                newsched = normalize_nodepath(sched.replace(
                    oldroot, newroot))  #+ sched_name + '/'
                self._persisted_data[newsched] = data
                self.put_sched_cfg(newsched, serialize_node(as_node(newsched)))

    def get_sched_cfg(self, nodepath):
        return self._get_entry('cfg', nodepath)

    def put_sched_cfg(self, nodepath, cfg):
        self._put_entry('cfg', nodepath, cfg)

    def get_sched_summary(self, nodepath):
        return self._get_entry('summary', nodepath)

    def put_sched_summary(self, nodepath, summary):
        self._put_entry('summary', nodepath, summary)

    def get_sched_props(self, nodepath):
        return self._get_entry('properties', nodepath)

    def put_sched_props(self, nodepath, properties):
        self._put_entry('properties', nodepath, properties)

    def get_sched_meta(self, nodepath):
        return self._get_entry('meta', nodepath)

    def put_sched_meta(self, nodepath, meta):
        self._put_entry('meta', nodepath, meta)

    def get_fail_list(self, nodepath):
        return self._get_entry('fail_list', nodepath)

    def put_fail_list(self, nodepath, fail_list):
        self._put_entry('fail_list', nodepath, fail_list)

    def get_sync_state(self, nodepath):
        return self._get_entry('sync_state', nodepath)

    def put_sync_state(self, nodepath, sync_state):
        self._put_entry('sync_state', nodepath, sync_state)

    def get_override(self, nodepath):
        return self._get_entry('override', nodepath)

    def put_override(self, nodepath, override):
        self._put_entry('override', nodepath, override)

    def _get_entry(self, ptype, nodepath):
        return self.get_sched(normalize_nodepath(nodepath))[ptype]

    def _put_entry(self, ptype, nodepath, value):
        nodepath = normalize_nodepath(nodepath)
        sched = self.get_sched(nodepath)
        assert sched, \
        'A schedule must exist before data can be stored against it.'
        sched[ptype] = value
        self._persisted_data.notify_changed(nodepath)

    def singleton_unload_hook(self):
        pass
Exemple #7
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'node_config':node_config,
        #            'group_config':group_config,
        #            'entity_map':entity_map}}
        self._persisted_data = PersistentDictionary('GSPData')
        self.debug = 1
        self._persist_enabled = False

    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Global Setpoint Manager', mtype, message)

    def persist_enabled(self):
        return self._persist_enabled

    def enable_persist(self):
        self._persist_enabled = True

    def disable_persist(self):
        self._persist_enabled = False

    def get_gsp_groups(self):
        groups = self._persisted_data.keys()
        groups.sort(lambda a, b: cmp(a.count('/'), b.count('/')))
        return groups

    def get_gsp_group(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]

    def put_gsp_group(self, nodepath, nodedata):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            data = {
                'node_config': {},
                'group_config': [],
                'entity_map': {},
                'node_factory': ''
            }
            self._persisted_data[nodepath] = data
        self.put_gsp_group_data(nodepath, nodedata)

    def remove_gsp_group(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            del self._persisted_data[nodepath]

    def get_gsp_group_data(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        return self._persisted_data[nodepath]

    def put_gsp_group_data(self, nodepath, nodedata):
        nodepath = normalize_nodepath(nodepath)
        for data_key in self._persisted_data[nodepath].keys():
            value = nodedata.get(data_key)
            if value is not None:
                self._put_entry(nodepath, data_key, value)

    def get_gsp_group_nconfig(self, nodepath):
        # node configuration data
        return self._get_entry(nodepath, 'node_config')

    def put_gsp_group_nconfig(self, nodepath, value):
        # node configuration data
        self._put_entry(nodepath, 'node_config', value)

    def get_gsp_group_gconfig(self, nodepath):
        # gsp group configuration data
        return self._get_entry(nodepath, 'group_config')

    def putt_gsp_group_gconfig(self, nodepath, value):
        # gsp group configuration data
        self._put_entry(nodepath, 'group_config', value)

    def get_gsp_group_entity_map(self, nodepath):
        return self._get_entry(nodepath, 'entity_map')

    def put_gsp_group_entity_map(self, nodepath, value):
        self._put_entry(nodepath, 'entity_map', value)

    def _get_entry(self, nodepath, data_type):
        return self.get_gsp_group(normalize_nodepath(nodepath))[data_type]

    def _put_entry(self, nodepath, data_type, value):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        group = self.get_gsp_group(nodepath)
        assert group, \
        'A group must exist before data can be stored against it.'
        group[data_type] = value
        self._persisted_data.notify_changed(nodepath)

    def singleton_unload_hook(self):
        pass
Exemple #8
0
class EquipmentMonitor(CompositeNode):
    implements(IEquipmentMonitor)
    def __init__(self, *args):
        self.test_machines = []
        self.synclock = RLock()
        self.threadcount = 1
        self.formatter = None
        self.transporter = None
        self.smservice = None
        self.subscriptions = None
        self.running = Flag()
        self.work_threads = []
        self.work_queue = Queue()
        self.scheduling_lock = Lock()
        self.execution_groups = Dictionary()
        self.smnodeurl = '/services/Subscription Manager'
        super(EquipmentMonitor, self).__init__(*args)
    def configure(self, config):
        self.smnodeurl = config.get('subscription_manager', self.smnodeurl)
        self.threadcount = int(config.get('threadcount', self.threadcount))
        super(EquipmentMonitor, self).configure(config)
    def configuration(self):
        config = super(EquipmentMonitor, self).configuration()
        config['subscription_manager'] = self.smnodeurl
        config['threadcount'] = str(self.threadcount)
        return config
    def start(self):
        if self.is_running():
            raise TypeError("Equipment Monitor already running.")
        if TESTING and not self.test_machines:
            self.test_machines = setup_machines()
            machinecount = len(self.test_machines)
            self.debugout("Setup %d test machines" % machinecount)
        self.synclock.acquire()
        try:
            self.running.set()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.formatter = None
            self.transporter = None
            children = self.children_nodes()
            for childnode in children:
                if IFormatter.providedBy(childnode):
                    if self.formatter is not None:
                        raise TypeError("Already has formatter child.")
                    self.formatter = childnode
                if ITransporter.providedBy(childnode):
                    if self.transporter is not None:
                        raise TypeError("Already has transporter child.")
                    self.transporter = childnode
            if not self.formatter:
                raise TypeError("Must have one formatter child node.")
            if not self.transporter:
                raise TypeError("Must have one transporter child node.")
            self.smservice = as_node(self.smnodeurl)
            self.subscriptions = PersistentDictionary(
                self.name, encode=self.serialize_subscription, 
                decode=self.unserialize_subscription)
            pdodata = PersistentDataObject(self)
            if os.path.exists(pdodata.filename()):
                msglog.log('broadway', msglog.types.WARN, 
                           "Equipment Monitor upgrading persistence.")
                migrate = frompdo(pdodata)
                self.subscriptions.update(migrate)
                message = "Equipment Monitor merged %d subscriptions."
                message = message % len(migrate)
                msglog.log('broadway', msglog.types.INFO, message)
                pdodata.destroy()
                msglog.log('broadway', msglog.types.WARN, 
                           "Equipment Monitor destroyed old persistence.")
                msglog.log('broadway', msglog.types.INFO, 
                           "Equipment Monitor persistence upgrade complete.")
            del(pdodata)
            message = 'Equipment Monitor startup: %s %s'
            for subscription in self.subscriptions.values():
                try:
                    subscription.setup_subscription()
                except:
                    msglog.exception(prefix="handled")
                else:
                    self.debugout(message % ('setup', subscription))
            skipcounts = []
            for i in range(0, 1 + len(self.subscriptions) / 30):
                skipcounts.extend([i + 1] * 30)
            self.setup_work_threads()
            for subscription in self.subscriptions.values():
                try: 
                    subscription.start(skipcounts.pop())
                except: 
                    msglog.exception(prefix = "Handled")        
                else:
                    self.debugout(message % ('started', subscription))
        except:
            self.cleanup_resources()
            self.running.clear()
            raise
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).start()
    def stop(self):
        if not self.is_running():
            raise TypeError('Equipment Monitor not running.')
        self.synclock.acquire()
        try:
            self.running.clear()
            message = "Equipment Monitor shutdown: %s %s"
            for subscription in self.subscriptions.values():
                try: 
                    subscription.stop()
                except: 
                    msglog.exception(prefix='Handled')
                else:
                    self.debugout(message % ('stopped', subscription))
            self.teardown_work_threads()
        except:
            message = "Exception caused Eqiupment Monitor shutdown to fail."
            msglog.log('broadway', msglog.types.ERR, message)
            self.running.set()
            raise
        else:
            self.cleanup_resources()
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).stop()
    def get_subscription(self, sid, default = None):
        return self.subscriptions.get(sid, default)
    def get_subscription_manager(self):
        return self.smservice
    def get_formatter(self):
        return self.formatter
    def get_transporter(self):
        return self.transporter
    def schedule_subscription(self, subscription, timestamp):
        self.scheduling_lock.acquire()
        try:
            schedulegroup = self.execution_groups.get(timestamp)
            if schedulegroup is None:
                schedulegroup = SubscriptionGroup(self, timestamp)
                self.execution_groups[timestamp] = schedulegroup
                schedulegroup.scheduled = scheduler.at(
                    timestamp, schedulegroup.execute)
            schedentry = schedulegroup.add_subscription(subscription)
        finally:
            self.scheduling_lock.release()
        return schedentry
    def enqueue_work(self, callback, *args):
        self.work_queue.put((callback, args))
    def dequeue_work(self, blocking = True):
        return self.work_queue.get(blocking)
    def is_running(self):
        return self.running.isSet()
    def assert_running(self):
        if not self.is_running():
            raise TypeError('Service must be running.')
        return
    def create_pushed(self, target, node_table, period=2, retries=10):
        self.assert_running()
        pushed = PushedSubscription(self, target, node_table, period, retries)
        sid = pushed.setup_subscription()
        self.subscriptions[sid] = pushed
        message = ['Equipment Monitor created subscription: ']
        message.append('Target URL: %s' % target)
        message.append('Period: %d sec' % period)
        message.append('Subscription ID: %s' % sid)
        if isinstance(node_table, str):
            message.append('Subscription for children of: %s' % node_table)
        else:
            firstthree = node_table.items()[0:3]
            message.append('Number of nodes: %d' % len(node_table))
            message.append('First three nodes: %s' % (firstthree,))
        self.debugout('\n    '.join(message), 2)
        pushed.start(1)
        return sid
    def cancel(self, sid):
        self.assert_running()
        if self.pause(sid):
            subscription = self.subscriptions.pop(sid)
            message = 'Equipment Monitor cancelled subscription: "%s"'
            self.debugout(message % sid, 2)
            return True
        return False
    def pause(self, sid, delay = None):
        subscription = self.subscriptions.get(sid)
        if subscription and subscription.is_running():
            subscription.stop()
            return True
        else:
            return False
    def play(self, sid):
        self.assert_running()
        subscription = self.subscriptions[sid]
        if not subscription.is_running():
            subscription.start()
            return True
        else:
            return False
    def reset(self, sid):
        subscription = self.subscriptions.get(sid)
        if subscription:
            subscription.reset_subscription()
            return True
        else:
            return False
    def list_subscriptions(self):
        return self.subscriptions.keys()
    def notify_group_executed(self, group):
        self.scheduling_lock.acquire()
        try:
            self.execution_groups.pop(group.timestamp)
        finally:
            self.scheduling_lock.release()
    def cleanup_resources(self):
        self.synclock.acquire()
        try:
            for group in self.execution_groups:
                try: 
                    group.scheduled.cancel()
                except:
                    msglog.exception(prefix="handled")
            self.execution_groups.clear()
            try:
                while self.work_queue.get_nowait():
                    pass
            except Empty:
                pass
            if self.transporter:
                commonitor = self.transporter.monitor
                transmanager = self.transporter.transaction_manager
                try:
                    commonitor.shutdown_channels()
                except:
                    msglog.exception(prefix="handled")
                transmanager.controllers.clear()
            if self.subscriptions and not self.subscriptions.closed():            
                self.subscriptions.close()
            self.subscriptions = None
            self.transporter = None
            self.formatter = None
        finally:
            self.synclock.release()
    def setup_work_threads(self):
        assert self.is_running()
        assert len(self.work_threads) == 0
        while len(self.work_threads) < self.threadcount:
            monitor = WorkThread(self.is_running, self.dequeue_work)
            monitor.setDaemon(True)
            monitor.start()
            self.work_threads.append(monitor)
        return len(self.work_threads)
    def teardown_work_threads(self):
        assert not self.is_running()
        threadcount = len(self.work_threads)
        map(self.work_queue.put, [None] * threadcount)
        while self.work_threads:
            self.work_threads.pop().join()
        return threadcount
    def serialize_subscription(self, subscription):
        return repr(subscription.as_dictionary())
    def unserialize_subscription(self, data):
        return PushedSubscription.from_dictionary(eval(data))
    def debugout(self, dbmessage, dblevel = 1):
        if dblevel <= DEBUG: 
            msglog.log('broadway', msglog.types.DB, dbmessage)
Exemple #9
0
class EquipmentMonitor(CompositeNode):
    implements(IEquipmentMonitor)

    def __init__(self, *args):
        self.test_machines = []
        self.synclock = RLock()
        self.threadcount = 1
        self.formatter = None
        self.transporter = None
        self.smservice = None
        self.subscriptions = None
        self.running = Flag()
        self.work_threads = []
        self.work_queue = Queue()
        self.scheduling_lock = Lock()
        self.execution_groups = Dictionary()
        self.smnodeurl = '/services/Subscription Manager'
        super(EquipmentMonitor, self).__init__(*args)

    def configure(self, config):
        self.smnodeurl = config.get('subscription_manager', self.smnodeurl)
        self.threadcount = int(config.get('threadcount', self.threadcount))
        super(EquipmentMonitor, self).configure(config)

    def configuration(self):
        config = super(EquipmentMonitor, self).configuration()
        config['subscription_manager'] = self.smnodeurl
        config['threadcount'] = str(self.threadcount)
        return config

    def start(self):
        if self.is_running():
            raise TypeError("Equipment Monitor already running.")
        if TESTING and not self.test_machines:
            self.test_machines = setup_machines()
            machinecount = len(self.test_machines)
            self.debugout("Setup %d test machines" % machinecount)
        self.synclock.acquire()
        try:
            self.running.set()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.formatter = None
            self.transporter = None
            children = self.children_nodes()
            for childnode in children:
                if IFormatter.providedBy(childnode):
                    if self.formatter is not None:
                        raise TypeError("Already has formatter child.")
                    self.formatter = childnode
                if ITransporter.providedBy(childnode):
                    if self.transporter is not None:
                        raise TypeError("Already has transporter child.")
                    self.transporter = childnode
            if not self.formatter:
                raise TypeError("Must have one formatter child node.")
            if not self.transporter:
                raise TypeError("Must have one transporter child node.")
            self.smservice = as_node(self.smnodeurl)
            self.subscriptions = PersistentDictionary(
                self.name,
                encode=self.serialize_subscription,
                decode=self.unserialize_subscription)
            pdodata = PersistentDataObject(self)
            if os.path.exists(pdodata.filename()):
                msglog.log('broadway', msglog.types.WARN,
                           "Equipment Monitor upgrading persistence.")
                migrate = frompdo(pdodata)
                self.subscriptions.update(migrate)
                message = "Equipment Monitor merged %d subscriptions."
                message = message % len(migrate)
                msglog.log('broadway', msglog.types.INFO, message)
                pdodata.destroy()
                msglog.log('broadway', msglog.types.WARN,
                           "Equipment Monitor destroyed old persistence.")
                msglog.log('broadway', msglog.types.INFO,
                           "Equipment Monitor persistence upgrade complete.")
            del (pdodata)
            message = 'Equipment Monitor startup: %s %s'
            for subscription in self.subscriptions.values():
                try:
                    subscription.setup_subscription()
                except:
                    msglog.exception(prefix="handled")
                else:
                    self.debugout(message % ('setup', subscription))
            skipcounts = []
            for i in range(0, 1 + len(self.subscriptions) / 30):
                skipcounts.extend([i + 1] * 30)
            self.setup_work_threads()
            for subscription in self.subscriptions.values():
                try:
                    subscription.start(skipcounts.pop())
                except:
                    msglog.exception(prefix="Handled")
                else:
                    self.debugout(message % ('started', subscription))
        except:
            self.cleanup_resources()
            self.running.clear()
            raise
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).start()

    def stop(self):
        if not self.is_running():
            raise TypeError('Equipment Monitor not running.')
        self.synclock.acquire()
        try:
            self.running.clear()
            message = "Equipment Monitor shutdown: %s %s"
            for subscription in self.subscriptions.values():
                try:
                    subscription.stop()
                except:
                    msglog.exception(prefix='Handled')
                else:
                    self.debugout(message % ('stopped', subscription))
            self.teardown_work_threads()
        except:
            message = "Exception caused Eqiupment Monitor shutdown to fail."
            msglog.log('broadway', msglog.types.ERR, message)
            self.running.set()
            raise
        else:
            self.cleanup_resources()
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).stop()

    def get_subscription(self, sid, default=None):
        return self.subscriptions.get(sid, default)

    def get_subscription_manager(self):
        return self.smservice

    def get_formatter(self):
        return self.formatter

    def get_transporter(self):
        return self.transporter

    def schedule_subscription(self, subscription, timestamp):
        self.scheduling_lock.acquire()
        try:
            schedulegroup = self.execution_groups.get(timestamp)
            if schedulegroup is None:
                schedulegroup = SubscriptionGroup(self, timestamp)
                self.execution_groups[timestamp] = schedulegroup
                schedulegroup.scheduled = scheduler.at(timestamp,
                                                       schedulegroup.execute)
            schedentry = schedulegroup.add_subscription(subscription)
        finally:
            self.scheduling_lock.release()
        return schedentry

    def enqueue_work(self, callback, *args):
        self.work_queue.put((callback, args))

    def dequeue_work(self, blocking=True):
        return self.work_queue.get(blocking)

    def is_running(self):
        return self.running.isSet()

    def assert_running(self):
        if not self.is_running():
            raise TypeError('Service must be running.')
        return

    def create_pushed(self, target, node_table, period=2, retries=10):
        self.assert_running()
        pushed = PushedSubscription(self, target, node_table, period, retries)
        sid = pushed.setup_subscription()
        self.subscriptions[sid] = pushed
        message = ['Equipment Monitor created subscription: ']
        message.append('Target URL: %s' % target)
        message.append('Period: %d sec' % period)
        message.append('Subscription ID: %s' % sid)
        if isinstance(node_table, str):
            message.append('Subscription for children of: %s' % node_table)
        else:
            firstthree = node_table.items()[0:3]
            message.append('Number of nodes: %d' % len(node_table))
            message.append('First three nodes: %s' % (firstthree, ))
        self.debugout('\n    '.join(message), 2)
        pushed.start(1)
        return sid

    def cancel(self, sid):
        self.assert_running()
        if self.pause(sid):
            subscription = self.subscriptions.pop(sid)
            message = 'Equipment Monitor cancelled subscription: "%s"'
            self.debugout(message % sid, 2)
            return True
        return False

    def pause(self, sid, delay=None):
        subscription = self.subscriptions.get(sid)
        if subscription and subscription.is_running():
            subscription.stop()
            return True
        else:
            return False

    def play(self, sid):
        self.assert_running()
        subscription = self.subscriptions[sid]
        if not subscription.is_running():
            subscription.start()
            return True
        else:
            return False

    def reset(self, sid):
        subscription = self.subscriptions.get(sid)
        if subscription:
            subscription.reset_subscription()
            return True
        else:
            return False

    def list_subscriptions(self):
        return self.subscriptions.keys()

    def notify_group_executed(self, group):
        self.scheduling_lock.acquire()
        try:
            self.execution_groups.pop(group.timestamp)
        finally:
            self.scheduling_lock.release()

    def cleanup_resources(self):
        self.synclock.acquire()
        try:
            for group in self.execution_groups:
                try:
                    group.scheduled.cancel()
                except:
                    msglog.exception(prefix="handled")
            self.execution_groups.clear()
            try:
                while self.work_queue.get_nowait():
                    pass
            except Empty:
                pass
            if self.transporter:
                commonitor = self.transporter.monitor
                transmanager = self.transporter.transaction_manager
                try:
                    commonitor.shutdown_channels()
                except:
                    msglog.exception(prefix="handled")
                transmanager.controllers.clear()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.subscriptions = None
            self.transporter = None
            self.formatter = None
        finally:
            self.synclock.release()

    def setup_work_threads(self):
        assert self.is_running()
        assert len(self.work_threads) == 0
        while len(self.work_threads) < self.threadcount:
            monitor = WorkThread(self.is_running, self.dequeue_work)
            monitor.setDaemon(True)
            monitor.start()
            self.work_threads.append(monitor)
        return len(self.work_threads)

    def teardown_work_threads(self):
        assert not self.is_running()
        threadcount = len(self.work_threads)
        map(self.work_queue.put, [None] * threadcount)
        while self.work_threads:
            self.work_threads.pop().join()
        return threadcount

    def serialize_subscription(self, subscription):
        return repr(subscription.as_dictionary())

    def unserialize_subscription(self, data):
        return PushedSubscription.from_dictionary(eval(data))

    def debugout(self, dbmessage, dblevel=1):
        if dblevel <= DEBUG:
            msglog.log('broadway', msglog.types.DB, dbmessage)
Exemple #10
0
class NodeConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.nodes = None
        super(NodeConfigurator, self).__init__(*args, **kw)

    def start(self):
        if self.nodes is None:
            dictname = "%s (%s)" % (type(self).__name__, self.name)
            self.nodes = PersistentDictionary(dictname)
            nodeurls = self.nodes.keys()
            nodeurls.sort(pathcompare)
            for nodeurl in nodeurls:
                nodedata = self.nodes[nodeurl]
                factory, configuration = nodedata
                self.create_node(factory, nodeurl, **configuration)
        super(NodeConfigurator, self).start()

    def get_managed_node(self, nodeurl):
        if not self.nodes.has_key(nodeurl):
            raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl)
        return as_node(nodeurl)

    def node_children(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.children_names()

    def node_configuration(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.configuration()

    def start_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.start()

    def stop_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.stop()

    def node_attr(self, nodeurl, name, value=Undefined):
        node = self.get_managed_node(nodeurl)
        if value is not Undefined:
            setattr(node, name, value)
            self.updatepdo(nodeurl, node)
        return getattr(node, name)

    def configure_node(self, nodeurl, config):
        node = self.get_managed_node(nodeurl)
        node.stop()
        try:
            node.configure(config)
        except:
            msglog.log("broadway", msglog.types.WARN,
                       "Error prevented reconfiguration of node: %s" % node)
            msglog.exception(prefix="handled")
            msglog.log("broadway", msglog.types.WARN,
                       "Rolling back configuration.")
            try:
                node.configure(self.nodes[nodeurl])
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Configuration rollback failed.")
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO,
                           "Rollback of configuration succeeded.")
        else:
            msglog.log("broadway", msglog.types.INFO,
                       "Node reconfigured: %s" % node)
            self.updatepdo(nodeurl, node)
        finally:
            node.start()
        return node.configuration()

    def create_node(self, factory, nodeurl, **config):
        try:
            as_node(nodeurl)
        except KeyError:
            pass
        else:
            raise TypeError("Node exists: %s" % nodeurl)
        if isinstance(factory, str):
            module, sep, name = factory.rpartition(".")
            if name:
                exec("import %s" % module)
            factory = eval(factory)
        parent, sep, name = nodeurl.rpartition("/")
        configuration = {"name": name, "parent": parent}
        configuration.update(config)
        node = factory()
        try:
            node.configure(configuration)
        except:
            msglog.log("broadway", msglog.types.WARN,
                       "Error prevented configuration of new node: %s" % node)
            msglog.exception(prefix="handled")
            try:
                node.prune()
            except:
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO,
                           "Node successfully pruned.")
        else:
            msglog.log("broadway", msglog.types.INFO,
                       "New node created: %s" % node)
            self.updatepdo(nodeurl, node)
            node.start()
        return node.configuration()

    def remove_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.prune()
        self.updatepdo(nodeurl, None)

    def updatepdo(self, nodeurl, node):
        if self.nodes.has_key(nodeurl):
            self.nodes.pop(nodeurl)
        if node:
            node = as_node(node)
            nodeurl = as_node_url(node)
            datatype = type(node)
            factory = "%s.%s" % (datatype.__module__, datatype.__name__)
            data = (factory, node.configuration())
            self.nodes[nodeurl] = (factory, node.configuration())
        return nodeurl