def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary( filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del(pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node("/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start()
def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary(filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del (pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node( "/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start()
def start(self): self.managernode = self.as_node(self.manager) self.synclock.acquire() try: alarmsname = '%s (%s)' % (self.name, 'alarms') eventsname = '%s (%s)' % (self.name, 'events') self.alarms = PersistentDictionary(alarmsname, encode=self.encode, decode=self.decode) self.events = PersistentDictionary(eventsname, encode=self.encode, decode=self.decode) # Migrate PDO data from old style persistence. pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous alarm and event data") pdodata.events = {} pdodata.alarms = {} pdodata.load() migrate(pdodata, self.decode) self.rebuildstorage() pdodata.destroy() del(pdodata) finally: self.synclock.release() self.securitymanager = self.as_node('/services/Security Manager') register = self.managernode.register_for_type self.sub = register(self.handle_event, StateEvent) self.running.set() super(AlarmConfigurator, self).start()
def test_upgrade(self): from mpx.upgrade.persistent import persistent_0 old = persistent_0.PersistentDataObject("upgrade_test") old.purpose = "testing" old.save() old_filename = old._persistent.filename del (old.__dict__["_persistent"]) del (old) new = PersistentDataObject("upgrade_test") self.failIf(os.path.exists(old_filename), "Upgrade failed to remove old version") new.purpose = None new.load() self.failUnless(new.purpose == "testing", "Upgrade failed to get old value") new.destroy() del (new)
def test_upgrade(self): from mpx.upgrade.persistent import persistent_0 old = persistent_0.PersistentDataObject('upgrade_test') old.purpose = 'testing' old.save() old_filename = old._persistent.filename del (old.__dict__['_persistent']) del (old) new = PersistentDataObject('upgrade_test') self.failIf(os.path.exists(old_filename), 'Upgrade failed to remove old version') new.purpose = None new.load() self.failUnless(new.purpose == 'testing', 'Upgrade failed to get old value') new.destroy() del (new)
class CloudConfigurator(CompositeNode): def __init__(self, *args, **kw): self.secured = True self.path = "/cloudconfig" self.manager = '/services/Cloud Manager' self.security_manager = '/services/Security Manager' super(CloudConfigurator, self).__init__(*args, **kw) def configure(self, config): self.secured = as_boolean(as_internal_node("/services").secured) self.setattr('path', config.get('path',self.path)) self.setattr('manager', config.get('manager','/services/Cloud Manager')) super(CloudConfigurator, self).configure(config) def configuration(self): config = super(CloudConfigurator, self).configuration() config['path'] = self.getattr('path') config['manager'] = self.getattr('manager') config['secured'] = str(int(self.secured)) return config def stop(self): if not isinstance(self.manager, str): self.manager.dispatcher.unregister(self.sub) self.manager = as_node_url(self.manager) if not isinstance(self.security_manager, str): self.security_manager = as_node_url(self.security_manager) return super(CloudConfigurator, self).stop() def get_manager(self): manager = self.manager if self.secured: manager = self.security_manager.as_secured_node(manager) return manager def match(self, path): return path.startswith(self.path) def start(self): self.manager = self.nodespace.as_node(self.manager) self.security_manager = as_node(self.security_manager) self._pdo = PersistentDataObject(self) msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename()) msglog.log('CloudConfigurator', msglog.types.INFO,msg) if os.path.exists(self._pdo.filename()): # Migration msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress') self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation)) self._pdo.load() formation = IPickles(cPickle.loads(self._pdo.formation))() msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation)) self.manager.update_formation(formation,None) self._pdo.destroy() del(self._pdo) msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete') return super(CloudConfigurator, self).start() def get_node_names(self): formation = self.manager.get_formation() norm_formation=self.manager.nformation.normalize_formation(formation) ind=norm_formation.index(self.manager.peer) # move the peer to be at the head of the list p=formation.pop(ind) formation.insert(0,p) #insert manager at the very first place portal=self.manager.get_portal() if(portal == None): formation.insert(0,"") else: formation.insert(0,portal) return (formation) def validate(self,name): name=name.strip() if ( not (valid_ip_address(name) or valid_hostname(name))): return(1) if(name == 'localhost' ): return(1) if(name == '127.0.0.1' ): return(1) return(0) def handle_request(self, request): pass #create_node: name - name of the peer/portal #config - type = config["type"] string will tell if this is a "Peer" or a "Portal" def create_node(self, name, config=()): config = dict(config) type = config['type'].lower() manager = self.get_manager() # Next statements verify access to modifier permitted. if type == "peer": manager.add_peer else: manager.set_portal config.setdefault("parent", self.manager) peer_or_portal = config.setdefault("name", name).strip() ret = self.validate(peer_or_portal) if(ret != 0 ): msg='Add Peer/Portal failed. %s is a invalid hostname/IP Address' %(peer_or_portal) raise ValueError(msg) if(valid_hostname(peer_or_portal)): tmp=get_ip_addr(peer_or_portal) if(not valid_ip_address(tmp) ): raise ValueError('Cannot resolve the hostname %s. Please try with a valid Hostname' %(peer_or_portal)) if(type == 'peer'): peer=peer_or_portal if (self.manager.is_peer_in_formation(peer) == False): if(self.manager.is_host_the_portal(peer) == False): msg='Adding %s as a Peer' %str(peer) msglog.log('CloudConfigurator', msglog.types.INFO,msg) # Use possibly secured reference for the add. manager.add_peer(peer) else: raise ValueError,'A Portal cannot be a Peer : "%s" is the Portal for the Cloud.' % peer else: raise ValueError,'Add peer did nothing: "%s" already in Cloud Formation.' % peer else: portal=peer_or_portal if(self.manager.is_host_the_portal(portal) == False): if (self.manager.is_peer_in_formation(portal) == False): msg='Setting the Portal as :%s' %str(portal) msglog.log('CloudConfigurator', msglog.types.INFO,msg) # Use possibly secured reference for the modification. manager.set_portal(portal) else: raise ValueError,'%s is in the formation. It cannot be added as Portal ' % portal else: raise ValueError,'Set Portal did nothing: "%s" already the Portal' % portal return(peer_or_portal) #remove_node: First check if name is manager then check in peer list to delete def remove_node(self, name): manager = self.get_manager() formation=self.manager.get_formation() if( name in formation ): msg='Removing %s as a Peer' %str(name) msglog.log('CloudConfigurator', msglog.types.INFO,msg) manager.remove_peer(name) else: msg='Removing %s as a Portal' %str(name) msglog.log('CloudConfigurator', msglog.types.INFO,msg) manager.set_portal(None) return name
def destroy(self): self.column_data = {} self.seq = 0 self.save() PersistentDataObject.destroy(self) self._build_lookup()
def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del(pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix = "Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start()
def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del (pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix="Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start()