Пример #1
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(filename,
                                                   encode=None,
                                                   decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO,
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del (pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node(
                 "/services/Security Manager")
         else:
             self.security_manager = None
     finally:
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Пример #2
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(
                 filename, encode=None, decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO, 
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del(pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node("/services/Security Manager")
         else:
             self.security_manager = None
     finally: 
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Пример #3
0
 def start(self):
     self.managernode = self.as_node(self.manager)
     self.synclock.acquire()
     try:
         alarmsname = '%s (%s)' % (self.name, 'alarms')
         eventsname = '%s (%s)' % (self.name, 'events')
         self.alarms = PersistentDictionary(alarmsname,
                                            encode=self.encode,
                                            decode=self.decode)
         self.events = PersistentDictionary(eventsname,
                                            encode=self.encode,
                                            decode=self.decode)
         # Migrate PDO data from old style persistence.
         pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.INFO,
                        "Migrating previous alarm and event data")
             pdodata.events = {}
             pdodata.alarms = {}
             pdodata.load()
             migrate(pdodata, self.decode)
             self.rebuildstorage()
             pdodata.destroy()
         del(pdodata)
     finally:
         self.synclock.release()
     self.securitymanager = self.as_node('/services/Security Manager')
     
     register = self.managernode.register_for_type
     self.sub = register(self.handle_event, StateEvent)
     self.running.set()
     super(AlarmConfigurator, self).start()
Пример #4
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self),
                                                      encode=None,
                                                      decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log('broadway', msglog.types.INFO,
                                "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child('trends'):
             self.trends = self.get_child('trends')
         else:
             self.trends = CompositeNode()
             self.trends.configure({'parent': self, 'name': 'trends'})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log('trendmanager', msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log('trendmanager', msglog.types.ERR, msg)
                 msglog.exception(prefix='Handled')
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log('trendmanager', msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix='Handled')
     except:
         self.__running = False
         raise
     return
Пример #5
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log("broadway", msglog.types.INFO, "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child("trends"):
             self.trends = self.get_child("trends")
         else:
             self.trends = CompositeNode()
             self.trends.configure({"parent": self, "name": "trends"})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log("trendmanager", msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log("trendmanager", msglog.types.ERR, msg)
                 msglog.exception(prefix="Handled")
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log("trendmanager", msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix="Handled")
     except:
         self.__running = False
         raise
     return
Пример #6
0
class CloudConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.secured = True
        self.path = "/cloudconfig"
        self.manager = '/services/Cloud Manager'
        self.security_manager = '/services/Security Manager'
        super(CloudConfigurator, self).__init__(*args, **kw)
    def configure(self, config):
        self.secured = as_boolean(as_internal_node("/services").secured)
        self.setattr('path', config.get('path',self.path))
        self.setattr('manager', config.get('manager','/services/Cloud Manager'))
        super(CloudConfigurator, self).configure(config)
    def configuration(self):
        config = super(CloudConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        config['secured'] = str(int(self.secured))
        return config
    def stop(self):
        if not isinstance(self.manager, str):
            self.manager.dispatcher.unregister(self.sub)
            self.manager = as_node_url(self.manager)
        if not isinstance(self.security_manager, str):
            self.security_manager = as_node_url(self.security_manager)
        return super(CloudConfigurator, self).stop()
    def get_manager(self):
        manager = self.manager
        if self.secured:
            manager = self.security_manager.as_secured_node(manager)
        return manager
    def match(self, path):
        return path.startswith(self.path)
    def start(self):
        self.manager = self.nodespace.as_node(self.manager)
        self.security_manager = as_node(self.security_manager)
        self._pdo = PersistentDataObject(self)
        msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename())
        msglog.log('CloudConfigurator', msglog.types.INFO,msg)
        if os.path.exists(self._pdo.filename()):
            # Migration 
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress')
            self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation))
            self._pdo.load()
            formation = IPickles(cPickle.loads(self._pdo.formation))()
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation))
            self.manager.update_formation(formation,None)
            self._pdo.destroy()
            del(self._pdo)
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete')           
        return super(CloudConfigurator, self).start()

    def get_node_names(self):
        formation = self.manager.get_formation()
        norm_formation=self.manager.nformation.normalize_formation(formation)
        ind=norm_formation.index(self.manager.peer)

        # move the peer to be at the head of the list
        p=formation.pop(ind)
        formation.insert(0,p)

        #insert manager at the very first place
        portal=self.manager.get_portal()
        if(portal == None):
            formation.insert(0,"")
        else:
            formation.insert(0,portal)
        return (formation)
 
    def validate(self,name):
        name=name.strip()
        if ( not (valid_ip_address(name) or valid_hostname(name))):
            return(1)
        if(name == 'localhost' ):
            return(1)
        if(name == '127.0.0.1' ):
            return(1)
        
        return(0)
    
    def handle_request(self, request):
        pass

        
    #create_node: name - name of the peer/portal
    #config - type = config["type"] string will tell if this is a "Peer" or a "Portal"
    def create_node(self, name, config=()):
        config = dict(config)
        type = config['type'].lower()
        manager = self.get_manager()
        # Next statements verify access to modifier permitted.
        if type == "peer":
            manager.add_peer
        else:
            manager.set_portal
        config.setdefault("parent", self.manager)
        peer_or_portal = config.setdefault("name", name).strip()
        ret = self.validate(peer_or_portal)
        if(ret != 0 ):
            msg='Add Peer/Portal failed. %s is a invalid hostname/IP Address' %(peer_or_portal)
            raise ValueError(msg)
        if(valid_hostname(peer_or_portal)):
            tmp=get_ip_addr(peer_or_portal)
            if(not valid_ip_address(tmp) ):
                raise ValueError('Cannot resolve the hostname %s. Please try with a valid Hostname' %(peer_or_portal))
        if(type == 'peer'):
            peer=peer_or_portal
            if (self.manager.is_peer_in_formation(peer) == False):
                if(self.manager.is_host_the_portal(peer) == False):
                    msg='Adding %s as a Peer' %str(peer)
                    msglog.log('CloudConfigurator', msglog.types.INFO,msg)
                    # Use possibly secured reference for the add.
                    manager.add_peer(peer)
                else:
                    raise ValueError,'A Portal cannot be a Peer : "%s" is the Portal for the Cloud.' % peer
            else:
                raise ValueError,'Add peer did nothing: "%s" already in Cloud Formation.' % peer
        else:
            portal=peer_or_portal
            if(self.manager.is_host_the_portal(portal) == False):
                if (self.manager.is_peer_in_formation(portal) == False):
                    msg='Setting the Portal as :%s' %str(portal)
                    msglog.log('CloudConfigurator', msglog.types.INFO,msg)
                    # Use possibly secured reference for the modification.
                    manager.set_portal(portal)
                else:
                    raise ValueError,'%s is in the formation. It cannot be added as Portal ' % portal
            else:
                raise ValueError,'Set Portal did nothing: "%s" already the Portal' % portal
        return(peer_or_portal)

    #remove_node: First check if name is manager then check in peer list to delete
    def remove_node(self, name):
        manager = self.get_manager()
        formation=self.manager.get_formation()
        if( name in formation ):
            msg='Removing %s as a Peer' %str(name)
            msglog.log('CloudConfigurator', msglog.types.INFO,msg)
            manager.remove_peer(name)
        else:
            msg='Removing %s as a Portal' %str(name)
            msglog.log('CloudConfigurator', msglog.types.INFO,msg)
            manager.set_portal(None)
        return name
Пример #7
0
class CloudManager(CompositeNode):
    implements(ICloudManager)
    security = SecurityInformation.from_default()
    secured_by(security)
    def __init__(self, *args):
        super(CloudManager, self).__init__(*args)
        self.dispatcher = Dispatcher('Cloud Manager:Dispatcher')
        register_utility(self, ICloudManager, 'Cloud Manager')
        self.peer = Event.LOCALORIGIN
        self.formation = []
        self._scheduled = None
        self.unreachable = {}
        self.subscription = None
        if((as_node('/services/network/https_server')).is_enabled()):
            self.secure_http = True
        else:
            self.secure_http = False
        self.channel_monitor = ChannelMonitor()
        self.channel_monitor.trigger = CallbackTrigger(self.channel_monitor)
    def stop(self):
        if self.subscription:
            self.remove_listener(self.subscription)
        if self.channel_monitor.is_running():
            self.channel_monitor.stop_monitor()
        self.subscription = None
        super(CloudManager, self).stop()

    def is_event_valid(self,cloudevent):
        portal = self.nformation.get_portal()
        topic=cloudevent.topics[0]
        if(topic == 'EventResend' ):
            if( (portal != None ) and utils.same_host(cloudevent.origin,portal) ):
                return(True)
        elif(topic == 'Alarm Manager' ):
            #if (self.is_peer_in_formation(cloudevent.origin) == True):
            return(True)
        elif(topic == 'CloudFormation' ):
            return(True)

        return(False)


    def handle_remote_event(self, data):
        cloudevent = IPickles(cPickle.loads(data))()
        self.message('Handling remote event from : %s topic=%s ' %(cloudevent.origin,cloudevent.topics))
        cloudevent.set_data(data)
        if(self.is_event_valid(cloudevent) == False ):
            self.message('Dropping the remote event from : %s topic=%s ' 
                           %(cloudevent.origin,cloudevent.topics),msglog.types.WARN)
            return
            
        self.dispatcher.dispatch(cloudevent, cloudevent.topics)
        if(not ('CloudFormation' in cloudevent.topics) ):
            return
        '''
        Dont propogate an event if we are Portal
        '''
        if((cloudevent.portal != None )and (utils.same_host(self.peer,cloudevent.portal)) ):
            self.message('Not Propagating remote event, since I am getting it as a portal:')
            return
        
        
        self.propogate(cloudevent)

    def send_event_to_portal(self,event,topic,target):
        cloudevent = CloudEvent(
            self, self.peer, [target], self.nformation.get_portal(),topic, event)

        protocol = "https" if self.secure_http else "http"
        notifier = CloudNotifier(self.channel_monitor,target, protocol,'/cloud', self.debug)
        notifier.cloudevent = cloudevent
        if not cloudevent.has_data():
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
        clouddata = cloudevent.get_data()
        notifier.notify(clouddata, self.handle_send_failure_portal,self.handle_send_success_portal)

    def handle_send_failure_portal(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Unable to send alarm events to portal=%s ' % (target_peer),msglog.types.WARN)

    def handle_send_success_portal(self,notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Succesfully sent alarm events to portal=%s ' % (target_peer))



    def handle_local_event(self, event, topics = []):
        cloudevent = CloudEvent(
            self, self.peer, self.target_formation, self.nformation.get_portal(),topics, event)
        self.propogate(cloudevent)
    
    def is_peer_in_formation(self,peer,formation=None):
        if not formation:
            formation = self.get_formation()
        peer_ip=utils.get_ip_addr(peer)
        return any(utils.same_host(peer_ip, ip) for ip in formation)
    
    def is_host_the_portal(self,host):
        portal = self.nformation.get_portal()
        if not portal:
            return False
        return utils.same_host(portal, host)
    
    def is_host_in_formation(self, host):
        if self.is_host_the_portal(host):
            return True
        if self.is_peer_in_formation(host):
            return True
        return False

    def handle_formation_update(self, cloudevent):
        '''
        Don't take any action like updating Cloud formation or Portal etc, 
        if you have got this event as a portal. The alarms are shown in the event manager
        by a different mechanism. 
        '''
        if((cloudevent.portal != None ) and (utils.same_host(self.peer,cloudevent.portal))):
            self.message('Received the event as a Portal, so not going to take any action %s' % str(cloudevent))
            self.message('handle_formation_update doing nothing, no change.')
            return
        
        formation = cloudevent()
        if (self.is_peer_in_formation(self.peer,formation) == False):
            formation = [self.peer]
            self.message('Setting Cloud Formation to self.peer; no longer in Cloud.',msglog.types.INFO)
        
        self._setup_formation(formation,cloudevent.portal)

    
    def _setup_formation(self, formation,portal):
        scheduled, self._scheduled = self._scheduled, None
        if scheduled is not None:
            try: scheduled.cancel()
            except: pass
            else: self.message('Canceled pending dispatch of formation update.')
        self.nformation.set_portal(portal)
        self.nformation.set_formation(formation)
        self.target_formation = self.nformation.compute_targets()
        self.message('Resetting unreachables during Cloud setup.')
        self.reset_unreachables()
        (dispatch,delay)=self.nformation.compute_dispatch_info()
        if (dispatch):
            self._scheduled = scheduler.after(delay, self.dispatcher.dispatch, (FormationUpdated(self),))
            self.message('Scheduled dispatch in %s seconds.' % delay)
        else: self.message('Formation of one peer, no Updated event generated.')
        
        # Save the PDO, if the formation or portal has changed
        if((self._pdo.formation != formation) or (self._pdo.portal != portal) or (self._pdo.peer != self.peer)):
            self.message('New formation/portal found , hence pickling. New Formation is :%s portal is %s' %(str(formation),portal))
            self._pdo.formation=formation[:]
            self._pdo.portal=portal
            self._pdo.peer=self.peer
            tstart = time.time()
            self._pdo.save()
            tend = time.time()
            self.message('New formation pickled and saved in %s seconds.' % (tend - tstart))
        else:
            self.message('Formation/Portal has not changed. Not pickling it. ' )

    
    def update_formation(self, new_formation,portal):
        (no_of_excluded_peers,excludes)=self.nformation.compute_excludes(new_formation)
        if no_of_excluded_peers:
            self.message( 'Notifying removed participants: %s' % (excludes,))
            excludedevent = CloudEvent(self, self.peer, excludes,self.nformation.get_portal(),['CloudFormation'], new_formation)
            self.propogate(excludedevent)
        else: 
            self.message( 'All current Cloud member in new Cloud Formation.')
        self._setup_formation(new_formation,portal)
        self.handle_local_event(new_formation, ['CloudFormation'])

    def handle_propogation_failure(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        # TODO: generate comm failure error to propogate as well.
        # Progpogate event to Cloud Managers target_peer would have notified.
        
        '''
        The target_peer can be portal or a peer.
        If it is a portal then we will not put it in unreachables and also 
        we do not propogate the event.
        Log if we are not connecting to the portal 
        '''
        portal=self.nformation.get_portal()
        if((portal != None ) and (utils.same_host(target_peer,portal))):
            msg='Portal %s is not reachable .' % portal
            self.message(msg)
            return
            
        
        scheduled = self.unreachable.get(target_peer)
        if scheduled is not None:
            scheduled.cancel()
            self.message('Host %s already listed unreachable, reset scheduled retry.' % target_peer)
        self.unreachable[target_peer] = scheduler.after(5 * 60, self._remove_from_unreachable, (target_peer,))
        self.message('Host %s added to list of unreachable peers.' % target_peer)
        self.propogate(cloudevent, target_peer)

    def _remove_from_unreachable(self, peer):
        if self.unreachable.has_key(peer):
            del(self.unreachable[peer])
            self.message('Removed "%s" from unreachable to retry.' % peer)
        else: self.message('Host "%s" not in unreachable, ignoring remove.' % peer)

    def reset_unreachables(self):
        message = 'Resetting unreachables:\n'
        unreachables = self.unreachable.items()
        self.unreachable.clear()
        for peer, entry in unreachables:
            entry.cancel()
            message += '\t- removed "%s" from unreachables;\n' % peer
        message += '\t%s peers cleared from unreachables.' % len(unreachables)
        self.message(message)
        return len(unreachables)

    def add_listener(self, callback, topic):
        return self.dispatcher.register_for_topic(callback, topic)

    def remove_listener(self, guid):
        return self.dispatcher.unregister(guid)

    def propogate(self, cloudevent, from_peer = None):
        '''
        No Changes - Just make sure ...
        '''
        if not isinstance(cloudevent, CloudEvent):
            raise TypeError('Argument must be instance of CloudEvent')
        if from_peer is None: from_peer = self.peer
        self.message('Propogating as %s:\n\t%s...' % (from_peer, str(cloudevent)))
        target_formation = cloudevent.targets

        notifiers = []
        targets = self.nformation.get_targets(target_formation, from_peer)
        for target in targets:
            if not target.strip():
                self.message('Not notifying "%s" because not valid, adding its targets.' % target)
                targets.extend(self.nformation.get_targets(target_formation, target))
            elif not self.unreachable.has_key(target):
                protocol = "https" if self.secure_http else "http"
                notifier = CloudNotifier(self.channel_monitor, target, 
                                         protocol, '/cloud', self.debug)
                notifier.cloudevent = cloudevent
                notifiers.append(notifier)
            else:
                self.message('Host "%s" unreachable, adding its targets.' % target)
                targets.extend(self.nformation.get_targets(target_formation, target))
        if not cloudevent.has_data():
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
        clouddata = cloudevent.get_data()
        notified = []
        for notifier in notifiers:
            notifier.notify(clouddata, self.handle_propogation_failure)
            notified.append(notifier.peer)
        if(len(notified) > 0 ):
            self.message('Propogate notified: %s' % (notified,))
        return notified

    

    def message(self, message, mtype = msglog.types.DB):
        if mtype != msglog.types.DB or self.debug:
            message = 'CloudManager(%s) - %s' % (self.peer, message)
            msglog.log('broadway', mtype, message)

    security.protect('add_peer', 'Configure')
    def add_peer(self,peer):
        formation=self.nformation.get_formation()
        formation.append(peer)
        portal=self.nformation.get_portal()
        self.update_formation(formation,portal)
        return

    def get_formation(self):
        formation=self.nformation.get_formation()
        return(formation)
    
    def get_portal(self):
        portal=self.nformation.get_portal()
        return(portal)

    security.protect('set_portal', 'Configure')
    def set_portal(self,portal):
        formation=self.nformation.get_formation()
        self.update_formation(formation,portal)

    security.protect('remove_peer', 'Configure')
    def remove_peer(self,peer):
        formation = self.nformation.get_formation()
        formation.remove(peer)
        portal=self.nformation.get_portal()
        self.update_formation(formation,portal)
    
    def start(self):
        # Bad self IP Address 
        if(self.peer == '127.0.0.1' ):
            msg='Cloud facility will not function properly because of local IP address being 127.0.0.1'
            self.message(msg,msglog.types.WARN)
            return 
        if not self.channel_monitor.is_running():
            self.channel_monitor.start_monitor()
        self._pdo=PersistentDataObject(self)
        self.message('The Cloud Manager Persistent Object is in the file :%s' %str(self._pdo.filename()),msglog.types.INFO)
        migration=False
        if(os.path.exists(self._pdo.filename())):
            # Already Migrated
            self._pdo.formation=[self.peer]
            self._pdo.portal=None
            self._pdo.peer=self.peer
            self._pdo.load()
        else:
            # We save a 'default' formation and expect the Cloud Configurator to 
            # update the _pdo.formation via update_information API.
            # The _setup_formation gets called internally from update_information
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
            migration=True
        
        #Bad formation/peer in the PDO
        if( not self._pdo.peer in self._pdo.formation ):
            #Bad formation/peer
            self.message('The Cloud Manager PDO in the file :%s is corrupted. Defaulting to safe configuration' %str(self._pdo.filename()),msglog.types.WARN)
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
        
        self.message('Hosts are :%s portal=%s self=%s' %(str(self._pdo.formation),self._pdo.portal,self._pdo.peer),msglog.types.INFO)
        self.nformation=NFormation(self._pdo.formation,self.peer)
        self.nformation.set_portal(self._pdo.portal)
        
        # IP Address Change Case
        if(not utils.same_host(self.peer,self._pdo.peer)):
            self.message('Self address change detected old=%s new=%s. Fixing the Cloud Formation accordingly' %(str(self._pdo.peer),self.peer),msglog.types.INFO)
            formation = self.nformation.get_formation()
            norm_form=self.nformation.normalize_formation(formation)
            # IP Address Swap
            self_index=norm_form.index(self._pdo.peer)
            formation.pop(self_index)
            formation.insert(0,self.peer)
            self.nformation.set_formation(formation)
               
        '''
        In the Case of Migration, the update_formation() API is called
        by the Cloud Configurator. In the already migrated case, we call the
        update_formation() with the PDO formation and Portal
        '''
        
        self.target_formation = self.nformation.compute_targets()
        
        if(migration == False):
            self.update_formation(self.nformation.get_formation(), self.nformation.get_portal())
        
        if self.subscription is None:
            self.subscription = self.add_listener(
                self.handle_formation_update, 'CloudFormation')

        # Send Cloud Event to all the Hosts for re-sending the alarm events 
        # over to the Portal again - if we are nbmm
        if(self.is_host_nbmm()):
            scheduler.after(10, self.request_for_resending_alarm_events)

        super(CloudManager, self).start()

    def is_host_nbmm(self):
        devices=as_node('/interfaces').children_names()
        if('relay1' in devices ):
            return(False)
        else:
            return(True)

    def get_hosts_list(self):
        hosts_list=[]
        all_hosts=as_node('/services/Host Manager').children_nodes()
        for h in all_hosts:
            hosts_list.append(h.host)
        return(hosts_list)

    def request_for_resending_alarm_events(self):
        hosts_list=self.get_hosts_list()
        for host in hosts_list:
            cloudevent = CloudEvent(self, self.peer, [host],None,['EventResend'],[host])
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
            self.send_req_for_alarm_events(host,cloudevent)

    def handle_send_failure(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Unable to notify %s to send alarm events again ' % (target_peer),msglog.types.WARN)

    def handle_send_success(self,notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('succesfully notified %s to send alarm events again ' % (target_peer))

    def send_req_for_alarm_events(self,target,cloudevent):
        from_peer=self.peer
        protocol = "https" if self.secure_http else "http"
        notifier = CloudNotifier(self.channel_monitor,target, protocol,'/cloud', self.debug)
        notifier.cloudevent = cloudevent
        clouddata = cloudevent.get_data()
        notifier.notify(clouddata, self.handle_send_failure,self.handle_send_success)
Пример #8
0
class Control(CompositeNode):
    ##
    # This attribute is used in the introspective generation
    # of configuration data.
    __module__ = mpx.service.control.__name__

    def __init__(self):
        CompositeNode.__init__(self)
        self._status = 'initialized'
        self._stale_apps = []

    def configure(self, config):
        self._pdo = PersistentDataObject(self)
        self._pdo.stats_dict = {
        }  #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
        self._pdo.load()
        # write_priority can be set for the entire control service, though
        # it may be specialized at the individual application node level.
        set_attribute(self, 'write_priority', 9, config, int)
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        self.pdo_file = self._pdo.filename()
        get_attribute(self, 'write_priority', config)
        get_attribute(self, 'pdo_file', config, str)
        return config

    def _save_pdo(
            self):  #no locking needed since the load and save cannot overlap
        start_time = time.time()
        self._pdo.save()
        msglog.log(
            self.as_node_url(), msglog.types.INFO,
            'Control service configuration data saved in: %s seconds' %
            (str(time.time() - start_time), ))

    def start(self):
        self._status = 'starting'
        self.stats_dict = {}  #clear out stats dict to force reload of app
        self.application_change_detector(1)  #starting

    def _start(self):
        CompositeNode.start(self)  #get the children ready for a trip...
        #now that the children are started, go back through the list and finish up the "graphical compile"
        for n in self.children_nodes():
            if n.hasattr('map_output_connections'):
                n.map_output_connections()
        for n in self.children_nodes():
            if n.hasattr('map_reference_output_connections'):
                n.map_reference_output_connections()
        for n in self.children_nodes():
            if n.hasattr('resolve_inputs'):
                n.resolve_inputs()
        for n in self.children_nodes():
            if n.hasattr('prepare_run_list'):
                n.prepare_run_list()
        for n in self.children_nodes():
            if n.hasattr('trigger_run_list'):
                n.trigger_run_list()

    def prune_orphaned_schedules(self):
        # remove schedules under /services/time/local/TIM that have no app
        manager = as_node('/services/time/local')
        if manager.has_child('TIM'):
            try:
                sh = as_node('/services/time/local/TIM')
                name_header = 'RZSched_'
                # create list of RZSched_'s under the TIM node
                schedules = filter(
                    lambda k: k[:len(name_header)] == name_header,
                    sh.children_names())
                # compare appname after RZSched_, upto : with our children names
                orphans = filter(
                    lambda k: k.split('_')[1].split(':')[0] not in self.
                    children_names(), schedules)
                for o in orphans:
                    try:
                        sh.get_child(o).prune()
                        msglog.log('Graphical Control:',
                                   'pruned orphaned schedule: ', o)
                    except:
                        msglog.exception()
                if len(orphans):
                    sh.save_schedule()
            except:
                msglog.exception()

    def check_and_load_application_files(self, starting=0):
        app_reloaded = starting  #return value to signal that the children need to be started
        save_pdo = 0  #flag to control saving config data to pdo
        files = os.listdir(
            config_path)  #/var/mpx/config/services/control (usually)
        xml_filenames = []
        for f in files:
            if f.find('.xml') > 0 and len(f) == (
                    f.find('.xml') + 4
            ):  #any xml file in this folder is assumed to be a control app
                xml_filenames.append(f)
                modify_time = os.stat(config_path + f)[8]
                stale_pdo = True
                no_stats_pdo = True
                if f in self._pdo.stats_dict:  #check for change since last time
                    no_stats_pdo = False
                    if self._pdo.stats_dict[f][0] == modify_time:
                        stale_pdo = False  #old news, no change detected
                #self.stats_dict[f]=modify_time
                if starting or no_stats_pdo or (
                        stale_pdo
                        and ALLOW_APP_RELOAD):  #need to (re)load application
                    if app_reloaded == 0:  #only stop all application nodes for the first detected change
                        try:
                            self._status = 'Stopping %s' % (f, )
                            msglog.log(
                                self.as_node_url(), msglog.types.INFO,
                                'Stage 0:  Stop Application templates.')
                            for c in self.children_nodes():
                                if hasattr(c, '_stop_running_app'):
                                    c._stop_running_app()
                        except:
                            msglog.exception()
                    app_reloaded = 1  #no need to "stop" for any other app changes
                    self._status = 'Loading %s' % (f, )
                    try:
                        root = None
                        if not stale_pdo:  #so no change was detected, we are starting up the framework
                            try:  #to get the pickled config data rather than load the xml again
                                msglog.log(
                                    self.as_node_url(), msglog.types.INFO,
                                    'Stage 1:  XML unchanged.  Loading configuration data from PDO: %s'
                                    % (f, ))
                                root = cPickle.loads(
                                    self._pdo.stats_dict[f][1])
                            except:
                                msglog.exception()
                                msglog.log(
                                    self.as_node_url(), msglog.types.WARN,
                                    'Stage 1:  Unable to reload config data. Next, try XML file.'
                                )
                        if root is None:
                            msglog.log(
                                self.as_node_url(), msglog.types.INFO,
                                'Stage 1:  Parsing configuration xml file: %s'
                                % (f, ))
                            root = parse_xml(config_path + f)
                            self._pdo.stats_dict[f] = (modify_time,
                                                       cPickle.dumps(root))
                            save_pdo = 1
                            if f in self._stale_apps:
                                self._stale_apps.remove(f)
                        #now we have the root configuration.  Turn it into configured nodes
                        module = root.get_config().get('module', None)
                        if module == 'mpx.ion.rz.rzhost_node.RzhostNode':
                            load_rz_root(root, self)
                        elif module == 'mpx.service.control.graphical.ApplicationNode':
                            load_tim_root(root, self)
                        else:
                            raise EInvalidValue()
                    except Exception, e:
                        msglog.exception()
                        pass
                elif stale_pdo:
                    if not f in self._stale_apps:
                        msglog.log(
                            self.as_node_url(), msglog.types.INFO,
                            'Application %s has been modified, please restart the framework.'
                            % (f, ))
                        self._stale_apps.append(f)
                        try:
                            self.get_child(f.split('.')[0]).set_stale_flag()
                        except:
                            msglog.exception()

        #clear out any leftover pdo for  deleted files
        for k in self._pdo.stats_dict.keys():
            try:
                if k not in xml_filenames:
                    save_pdo = 1  #force save of modififed pdo
                    del self._pdo.stats_dict[k]
                    n = k.split('.xml')[0]
                    if self.has_child(
                            n):  # prune any running app who's file was deleted
                        try:
                            self.get_child(n).prune()
                            self.prune_orphaned_schedules()
                        except:
                            msglog.exception()
            except:
                pass
        if save_pdo:
            thread_pool.LOW.queue_noresult(self._save_pdo)
        return app_reloaded
Пример #9
0
 def start(self):
     if self.is_running():
         raise TypeError("Equipment Monitor already running.")
     if TESTING and not self.test_machines:
         self.test_machines = setup_machines()
         machinecount = len(self.test_machines)
         self.debugout("Setup %d test machines" % machinecount)
     self.synclock.acquire()
     try:
         self.running.set()
         if self.subscriptions and not self.subscriptions.closed():
             self.subscriptions.close()
         self.formatter = None
         self.transporter = None
         children = self.children_nodes()
         for childnode in children:
             if IFormatter.providedBy(childnode):
                 if self.formatter is not None:
                     raise TypeError("Already has formatter child.")
                 self.formatter = childnode
             if ITransporter.providedBy(childnode):
                 if self.transporter is not None:
                     raise TypeError("Already has transporter child.")
                 self.transporter = childnode
         if not self.formatter:
             raise TypeError("Must have one formatter child node.")
         if not self.transporter:
             raise TypeError("Must have one transporter child node.")
         self.smservice = as_node(self.smnodeurl)
         self.subscriptions = PersistentDictionary(
             self.name, encode=self.serialize_subscription, 
             decode=self.unserialize_subscription)
         pdodata = PersistentDataObject(self)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.WARN, 
                        "Equipment Monitor upgrading persistence.")
             migrate = frompdo(pdodata)
             self.subscriptions.update(migrate)
             message = "Equipment Monitor merged %d subscriptions."
             message = message % len(migrate)
             msglog.log('broadway', msglog.types.INFO, message)
             pdodata.destroy()
             msglog.log('broadway', msglog.types.WARN, 
                        "Equipment Monitor destroyed old persistence.")
             msglog.log('broadway', msglog.types.INFO, 
                        "Equipment Monitor persistence upgrade complete.")
         del(pdodata)
         message = 'Equipment Monitor startup: %s %s'
         for subscription in self.subscriptions.values():
             try:
                 subscription.setup_subscription()
             except:
                 msglog.exception(prefix="handled")
             else:
                 self.debugout(message % ('setup', subscription))
         skipcounts = []
         for i in range(0, 1 + len(self.subscriptions) / 30):
             skipcounts.extend([i + 1] * 30)
         self.setup_work_threads()
         for subscription in self.subscriptions.values():
             try: 
                 subscription.start(skipcounts.pop())
             except: 
                 msglog.exception(prefix = "Handled")        
             else:
                 self.debugout(message % ('started', subscription))
     except:
         self.cleanup_resources()
         self.running.clear()
         raise
     finally:
         self.synclock.release()
     super(EquipmentMonitor, self).start()
Пример #10
0
 def start(self):
     if self.is_running():
         raise TypeError("Equipment Monitor already running.")
     if TESTING and not self.test_machines:
         self.test_machines = setup_machines()
         machinecount = len(self.test_machines)
         self.debugout("Setup %d test machines" % machinecount)
     self.synclock.acquire()
     try:
         self.running.set()
         if self.subscriptions and not self.subscriptions.closed():
             self.subscriptions.close()
         self.formatter = None
         self.transporter = None
         children = self.children_nodes()
         for childnode in children:
             if IFormatter.providedBy(childnode):
                 if self.formatter is not None:
                     raise TypeError("Already has formatter child.")
                 self.formatter = childnode
             if ITransporter.providedBy(childnode):
                 if self.transporter is not None:
                     raise TypeError("Already has transporter child.")
                 self.transporter = childnode
         if not self.formatter:
             raise TypeError("Must have one formatter child node.")
         if not self.transporter:
             raise TypeError("Must have one transporter child node.")
         self.smservice = as_node(self.smnodeurl)
         self.subscriptions = PersistentDictionary(
             self.name,
             encode=self.serialize_subscription,
             decode=self.unserialize_subscription)
         pdodata = PersistentDataObject(self)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor upgrading persistence.")
             migrate = frompdo(pdodata)
             self.subscriptions.update(migrate)
             message = "Equipment Monitor merged %d subscriptions."
             message = message % len(migrate)
             msglog.log('broadway', msglog.types.INFO, message)
             pdodata.destroy()
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor destroyed old persistence.")
             msglog.log('broadway', msglog.types.INFO,
                        "Equipment Monitor persistence upgrade complete.")
         del (pdodata)
         message = 'Equipment Monitor startup: %s %s'
         for subscription in self.subscriptions.values():
             try:
                 subscription.setup_subscription()
             except:
                 msglog.exception(prefix="handled")
             else:
                 self.debugout(message % ('setup', subscription))
         skipcounts = []
         for i in range(0, 1 + len(self.subscriptions) / 30):
             skipcounts.extend([i + 1] * 30)
         self.setup_work_threads()
         for subscription in self.subscriptions.values():
             try:
                 subscription.start(skipcounts.pop())
             except:
                 msglog.exception(prefix="Handled")
             else:
                 self.debugout(message % ('started', subscription))
     except:
         self.cleanup_resources()
         self.running.clear()
         raise
     finally:
         self.synclock.release()
     super(EquipmentMonitor, self).start()
Пример #11
0
class Control(CompositeNode):
    ##
    # This attribute is used in the introspective generation
    # of configuration data.
    __module__ = mpx.service.control.__name__
    
    def __init__(self):
        CompositeNode.__init__(self)
        self._status = 'initialized'
        self._stale_apps = []
    def configure(self, config):
        self._pdo = PersistentDataObject(self)
        self._pdo.stats_dict = {} #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
        self._pdo.load()
        # write_priority can be set for the entire control service, though
        # it may be specialized at the individual application node level. 
        set_attribute(self, 'write_priority', 9, config, int)
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        self.pdo_file = self._pdo.filename()
        get_attribute(self, 'write_priority', config)
        get_attribute(self, 'pdo_file', config, str)
        return config
    def _save_pdo(self): #no locking needed since the load and save cannot overlap
        start_time = time.time()
        self._pdo.save()
        msglog.log(self.as_node_url(),msglog.types.INFO,
                   'Control service configuration data saved in: %s seconds' % (str(time.time() - start_time),))
    def start(self):
        self._status = 'starting'
        self.stats_dict = {} #clear out stats dict to force reload of app
        self.application_change_detector(1) #starting
    def _start(self):
        CompositeNode.start(self) #get the children ready for a trip...
        #now that the children are started, go back through the list and finish up the "graphical compile"
        for n in self.children_nodes():
            if n.hasattr('map_output_connections'):
                n.map_output_connections()
        for n in self.children_nodes():
            if n.hasattr('map_reference_output_connections'):
                n.map_reference_output_connections()
        for n in self.children_nodes():
            if n.hasattr('resolve_inputs'):
                n.resolve_inputs()
        for n in self.children_nodes():
            if n.hasattr('prepare_run_list'):
                n.prepare_run_list()
        for n in self.children_nodes():
            if n.hasattr('trigger_run_list'):
                n.trigger_run_list()
    def prune_orphaned_schedules(self):
        # remove schedules under /services/time/local/TIM that have no app
        manager = as_node('/services/time/local')
        if manager.has_child('TIM'):
            try:
                sh = as_node('/services/time/local/TIM')
                name_header = 'RZSched_'
                # create list of RZSched_'s under the TIM node
                schedules = filter(lambda k:k[:len(name_header)] == name_header, sh.children_names())
                # compare appname after RZSched_, upto : with our children names
                orphans = filter(lambda k:k.split('_')[1].split(':')[0] not in self.children_names(), schedules)
                for o in orphans:
                    try:
                        sh.get_child(o).prune()
                        msglog.log('Graphical Control:', 'pruned orphaned schedule: ', o)
                    except:
                        msglog.exception()
                if len(orphans):
                    sh.save_schedule()
            except:
                msglog.exception()

    def check_and_load_application_files(self, starting=0):
        app_reloaded = starting #return value to signal that the children need to be started
        save_pdo = 0 #flag to control saving config data to pdo
        files = os.listdir(config_path) #/var/mpx/config/services/control (usually)
        xml_filenames = []
        for f in files:
            if f.find('.xml') > 0 and len(f) == (f.find('.xml') + 4): #any xml file in this folder is assumed to be a control app
                xml_filenames.append(f)
                modify_time = os.stat(config_path + f)[8]
                stale_pdo = True
                no_stats_pdo = True
                if f in self._pdo.stats_dict: #check for change since last time
                    no_stats_pdo = False
                    if self._pdo.stats_dict[f][0] == modify_time:
                        stale_pdo = False #old news, no change detected
                #self.stats_dict[f]=modify_time
                if starting or no_stats_pdo or (stale_pdo and ALLOW_APP_RELOAD): #need to (re)load application
                    if app_reloaded == 0: #only stop all application nodes for the first detected change
                        try:
                            self._status = 'Stopping %s' % (f,)
                            msglog.log(self.as_node_url(),msglog.types.INFO,
                                'Stage 0:  Stop Application templates.')
                            for c in self.children_nodes():
                                if hasattr(c, '_stop_running_app'):
                                    c._stop_running_app()
                        except:
                            msglog.exception()
                    app_reloaded = 1 #no need to "stop" for any other app changes
                    self._status = 'Loading %s' % (f,)
                    try:
                        root = None
                        if not stale_pdo: #so no change was detected, we are starting up the framework
                            try: #to get the pickled config data rather than load the xml again
                                msglog.log(self.as_node_url(),msglog.types.INFO,
                                       'Stage 1:  XML unchanged.  Loading configuration data from PDO: %s' % (f,))
                                root = cPickle.loads(self._pdo.stats_dict[f][1])
                            except:
                                msglog.exception()
                                msglog.log(self.as_node_url(),msglog.types.WARN,
                                       'Stage 1:  Unable to reload config data. Next, try XML file.')
                        if root is None:
                            msglog.log(self.as_node_url(),msglog.types.INFO,
                                       'Stage 1:  Parsing configuration xml file: %s' % (f,))
                            root = parse_xml(config_path + f)
                            self._pdo.stats_dict[f] = (modify_time, cPickle.dumps(root))
                            save_pdo = 1
                            if f in self._stale_apps:
                                self._stale_apps.remove(f)
                        #now we have the root configuration.  Turn it into configured nodes
                        module = root.get_config().get('module', None)
                        if module == 'mpx.ion.rz.rzhost_node.RzhostNode':
                            load_rz_root(root, self)
                        elif module == 'mpx.service.control.graphical.ApplicationNode':
                            load_tim_root(root, self)
                        else:
                            raise EInvalidValue()
                    except Exception, e:
                        msglog.exception()
                        pass
                elif stale_pdo:
                    if not f in self._stale_apps:
                        msglog.log(self.as_node_url(), msglog.types.INFO,
                                   'Application %s has been modified, please restart the framework.' % (f,))
                        self._stale_apps.append(f)
                        try:
                            self.get_child(f.split('.')[0]).set_stale_flag()
                        except:
                            msglog.exception()
                            
        #clear out any leftover pdo for  deleted files
        for k in self._pdo.stats_dict.keys():
            try:
                if k not in xml_filenames:
                    save_pdo = 1 #force save of modififed pdo
                    del self._pdo.stats_dict[k]
                    n = k.split('.xml')[0]
                    if self.has_child(n): # prune any running app who's file was deleted
                        try:
                            self.get_child(n).prune()
                            self.prune_orphaned_schedules()
                        except:
                            msglog.exception()
            except:
                pass
        if save_pdo:
            thread_pool.LOW.queue_noresult(self._save_pdo)
        return app_reloaded