Beispiel #1
0
class Logger(ServiceNode):

    ##
    # @author Craig Warren
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    ##
    # @author Craig Warren
    #   starts the logger service
    # @return None
    def start(self):
        ServiceNode.start(self)
        # this will correctly add the msglog as a child
        #  to the logger.
        if 'msglog' not in self.children_names():
            columns = mpx.lib.msglog.get_columns()
            log = Log()
            log.configure({'name':'msglog', 'parent':self})
            for c in columns:
                column = mpx.lib.factory('mpx.service.logger.column')
                config = c.configuration()
                config['parent'] = log
                column.configure(config)
        self._logs = PersistentDataObject(self)
        self._logs.names = []
        self._logs.load()
        for name in self._logs.names:
            if ((not mpx.lib.log.log_exists(name)) and 
                (name not in self.children_names())):
                log = mpx.lib.log.log(name)
                log.destroy()
                del(log)
        self._logs.names = []
        for child in self.children_nodes():
            if not isinstance(child, Alias):
                # Don't manage other managers' logs...
                self._logs.names.append(child.name)
        self._logs.save()

    ##
    # @author Craig Warren
    #   stops the logger service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # @author Craig Warren
    # @param log_name
    #   the name of the log to return
    # @return Log
    #   returns the log if it can't find the log it
    #   returns None
    def get_log(self,log_name):
        for child in self.children_nodes():
            if child.name == log_name:
                return child
        return None
Beispiel #2
0
class LastAlarm(CompositeNode,EventConsumerMixin):
    def __init__(self):
        self._last_alarm = None
        self._started = 0
        CompositeNode.__init__(self)
        EventConsumerMixin.__init__(self,self._alarm_triggered)
    def configure(self, config):
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        return config
    def start(self):
        self._pdo = PersistentDataObject(self)
        self._pdo.last_dictionary = None
        self._pdo.load()
        self._started = 1
        self.parent.event_subscribe(self,AlarmTriggerEvent)
        CompositeNode.start(self)
    def stop(self):
        selt._started = 0
        self.parent.cancel(self,AlarmTriggerEvent)
        CompositeNode.stop(self)
    def _alarm_triggered(self, alarm):
        self._last_alarm = alarm
        self._pdo.last_dictionary = alarm.dictionary()
        self._pdo.save()
    def get(self, skipCache=0):
        return self._last_alarm
    def get_dictionary(self):
        return self._pdo.last_dictionary
Beispiel #3
0
class SimplePersistentValue(SimpleValue):
    def configure(self, config):
        SimpleValue.configure(self, config)
        self._pdo = PersistentDataObject(self)
        self._pdo.value = None
        self._pdo.conversion = None
        self._pdo.load()
        conversion = _get_name(self.conversion)
        if (self._pdo.value == None or self._pdo.conversion != conversion):
            self._pdo.value = self.value
            self._pdo.conversion = conversion
            self._pdo.save()
        else:
            self.value = self._pdo.value

    def configuration(self):
        self.value = self._pdo.value
        return SimpleValue.configuration(self)

    def set(self, value, asyncOK=1):
        SimpleValue.set(self, value, asyncOK)
        self._pdo.value = self.value
        self._pdo.save()

    def get(self, skipCache=0):
        return self._pdo.value
Beispiel #4
0
 def _certificate_maintenance(self):
     previous = PersistentDataObject(self)
     previous.cert_config = None
     previous.key_file = None
     previous.server_cert = None
     previous.cert_fingerprint = None
     previous.load()
     c = certificate.CertificateConfiguration(self)
     config = {'C':self.country}
     config['ST'] = self.state
     config['L'] = self.city
     config['O'] = self.organization
     config['OU'] = self.organizational_unit
     config['CN'] = self.common_name
     config['emailAddress'] = self.email
     c.configure(config)
     cert_fingerprint = makecert.get_fingerprint(self.server_cert)
     if previous.cert_fingerprint == cert_fingerprint:
        msglog.log('broadway', msglog.types.INFO, 'Certificate Fingerprint Match!!!!' )
     else:
        msglog.log('broadway', msglog.types.INFO, 'Certificate Fingerprint Mismatch!!!!' )
     if c == previous.cert_config and \
        previous.key_file == self.key_file and \
        previous.cert_fingerprint == cert_fingerprint and \
        not certificate.is_outdated(self.server_cert):
         msglog.log('broadway', msglog.types.INFO,
                    'Using existing certificate')
         return
     msglog.log('broadway', msglog.types.INFO, 'Generating new certificate')
     filename = os.path.join(properties.TEMP_DIR, 'cert_config.tmp')
     file = open(filename, 'w')
     c.formatted_output_to_file(file)
     try:
         failed = 1
         makecert.create_from_file(filename, self.key_file,
                                   self.server_cert)
         failed = 0
         msglog.log('broadway', msglog.types.INFO,
                    'Certificate generated')
     except:
         msglog.exception()
         msglog.log('broadway', msglog.types.WARN,
                    'Certificate generation failed')
     file.close()
     os.remove(filename)
     if not failed:
         previous.cert_config = c.configuration()
         previous.key_file = self.key_file
         previous.server_cert = self.server_cert
         previous.cert_fingerprint = makecert.get_fingerprint(self.server_cert)
         previous.save()
     return
Beispiel #5
0
class LastAlarm(CompositeNode, EventConsumerMixin):
    def __init__(self):
        self._last_alarm = None
        self._started = 0
        CompositeNode.__init__(self)
        EventConsumerMixin.__init__(self, self._alarm_triggered)

    def configure(self, config):
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        return config

    def start(self):
        self._pdo = PersistentDataObject(self)
        self._pdo.last_dictionary = None
        self._pdo.load()
        self._started = 1
        self.parent.event_subscribe(self, AlarmTriggerEvent)
        CompositeNode.start(self)

    def stop(self):
        selt._started = 0
        self.parent.cancel(self, AlarmTriggerEvent)
        CompositeNode.stop(self)

    def _alarm_triggered(self, alarm):
        self._last_alarm = alarm
        self._pdo.last_dictionary = alarm.dictionary()
        self._pdo.save()

    def get(self, skipCache=0):
        return self._last_alarm

    def get_dictionary(self):
        return self._pdo.last_dictionary
Beispiel #6
0
class SimplePersistentValue(SimpleValue):
    def configure(self, config):
        SimpleValue.configure(self, config)
        self._pdo = PersistentDataObject(self)
        self._pdo.value = None
        self._pdo.conversion = None
        self._pdo.load()
        conversion = _get_name(self.conversion)
        if (self._pdo.value == None or 
            self._pdo.conversion != conversion):
            self._pdo.value = self.value
            self._pdo.conversion = conversion
            self._pdo.save()
        else:
            self.value = self._pdo.value
    def configuration(self):
        self.value = self._pdo.value
        return SimpleValue.configuration(self)
    def set(self,value,asyncOK=1):
        SimpleValue.set(self, value, asyncOK)
        self._pdo.value = self.value
        self._pdo.save()
    def get(self, skipCache=0):
        return self._pdo.value
Beispiel #7
0
 def __save(self):
     result = PersistentDataObject.save(self)
     self.__snapshot(self.saved())
     return result
Beispiel #8
0
class CloudManager(CompositeNode):
    implements(ICloudManager)
    security = SecurityInformation.from_default()
    secured_by(security)
    def __init__(self, *args):
        super(CloudManager, self).__init__(*args)
        self.dispatcher = Dispatcher('Cloud Manager:Dispatcher')
        register_utility(self, ICloudManager, 'Cloud Manager')
        self.peer = Event.LOCALORIGIN
        self.formation = []
        self._scheduled = None
        self.unreachable = {}
        self.subscription = None
        if((as_node('/services/network/https_server')).is_enabled()):
            self.secure_http = True
        else:
            self.secure_http = False
        self.channel_monitor = ChannelMonitor()
        self.channel_monitor.trigger = CallbackTrigger(self.channel_monitor)
    def stop(self):
        if self.subscription:
            self.remove_listener(self.subscription)
        if self.channel_monitor.is_running():
            self.channel_monitor.stop_monitor()
        self.subscription = None
        super(CloudManager, self).stop()

    def is_event_valid(self,cloudevent):
        portal = self.nformation.get_portal()
        topic=cloudevent.topics[0]
        if(topic == 'EventResend' ):
            if( (portal != None ) and utils.same_host(cloudevent.origin,portal) ):
                return(True)
        elif(topic == 'Alarm Manager' ):
            #if (self.is_peer_in_formation(cloudevent.origin) == True):
            return(True)
        elif(topic == 'CloudFormation' ):
            return(True)

        return(False)


    def handle_remote_event(self, data):
        cloudevent = IPickles(cPickle.loads(data))()
        self.message('Handling remote event from : %s topic=%s ' %(cloudevent.origin,cloudevent.topics))
        cloudevent.set_data(data)
        if(self.is_event_valid(cloudevent) == False ):
            self.message('Dropping the remote event from : %s topic=%s ' 
                           %(cloudevent.origin,cloudevent.topics),msglog.types.WARN)
            return
            
        self.dispatcher.dispatch(cloudevent, cloudevent.topics)
        if(not ('CloudFormation' in cloudevent.topics) ):
            return
        '''
        Dont propogate an event if we are Portal
        '''
        if((cloudevent.portal != None )and (utils.same_host(self.peer,cloudevent.portal)) ):
            self.message('Not Propagating remote event, since I am getting it as a portal:')
            return
        
        
        self.propogate(cloudevent)

    def send_event_to_portal(self,event,topic,target):
        cloudevent = CloudEvent(
            self, self.peer, [target], self.nformation.get_portal(),topic, event)

        protocol = "https" if self.secure_http else "http"
        notifier = CloudNotifier(self.channel_monitor,target, protocol,'/cloud', self.debug)
        notifier.cloudevent = cloudevent
        if not cloudevent.has_data():
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
        clouddata = cloudevent.get_data()
        notifier.notify(clouddata, self.handle_send_failure_portal,self.handle_send_success_portal)

    def handle_send_failure_portal(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Unable to send alarm events to portal=%s ' % (target_peer),msglog.types.WARN)

    def handle_send_success_portal(self,notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Succesfully sent alarm events to portal=%s ' % (target_peer))



    def handle_local_event(self, event, topics = []):
        cloudevent = CloudEvent(
            self, self.peer, self.target_formation, self.nformation.get_portal(),topics, event)
        self.propogate(cloudevent)
    
    def is_peer_in_formation(self,peer,formation=None):
        if not formation:
            formation = self.get_formation()
        peer_ip=utils.get_ip_addr(peer)
        return any(utils.same_host(peer_ip, ip) for ip in formation)
    
    def is_host_the_portal(self,host):
        portal = self.nformation.get_portal()
        if not portal:
            return False
        return utils.same_host(portal, host)
    
    def is_host_in_formation(self, host):
        if self.is_host_the_portal(host):
            return True
        if self.is_peer_in_formation(host):
            return True
        return False

    def handle_formation_update(self, cloudevent):
        '''
        Don't take any action like updating Cloud formation or Portal etc, 
        if you have got this event as a portal. The alarms are shown in the event manager
        by a different mechanism. 
        '''
        if((cloudevent.portal != None ) and (utils.same_host(self.peer,cloudevent.portal))):
            self.message('Received the event as a Portal, so not going to take any action %s' % str(cloudevent))
            self.message('handle_formation_update doing nothing, no change.')
            return
        
        formation = cloudevent()
        if (self.is_peer_in_formation(self.peer,formation) == False):
            formation = [self.peer]
            self.message('Setting Cloud Formation to self.peer; no longer in Cloud.',msglog.types.INFO)
        
        self._setup_formation(formation,cloudevent.portal)

    
    def _setup_formation(self, formation,portal):
        scheduled, self._scheduled = self._scheduled, None
        if scheduled is not None:
            try: scheduled.cancel()
            except: pass
            else: self.message('Canceled pending dispatch of formation update.')
        self.nformation.set_portal(portal)
        self.nformation.set_formation(formation)
        self.target_formation = self.nformation.compute_targets()
        self.message('Resetting unreachables during Cloud setup.')
        self.reset_unreachables()
        (dispatch,delay)=self.nformation.compute_dispatch_info()
        if (dispatch):
            self._scheduled = scheduler.after(delay, self.dispatcher.dispatch, (FormationUpdated(self),))
            self.message('Scheduled dispatch in %s seconds.' % delay)
        else: self.message('Formation of one peer, no Updated event generated.')
        
        # Save the PDO, if the formation or portal has changed
        if((self._pdo.formation != formation) or (self._pdo.portal != portal) or (self._pdo.peer != self.peer)):
            self.message('New formation/portal found , hence pickling. New Formation is :%s portal is %s' %(str(formation),portal))
            self._pdo.formation=formation[:]
            self._pdo.portal=portal
            self._pdo.peer=self.peer
            tstart = time.time()
            self._pdo.save()
            tend = time.time()
            self.message('New formation pickled and saved in %s seconds.' % (tend - tstart))
        else:
            self.message('Formation/Portal has not changed. Not pickling it. ' )

    
    def update_formation(self, new_formation,portal):
        (no_of_excluded_peers,excludes)=self.nformation.compute_excludes(new_formation)
        if no_of_excluded_peers:
            self.message( 'Notifying removed participants: %s' % (excludes,))
            excludedevent = CloudEvent(self, self.peer, excludes,self.nformation.get_portal(),['CloudFormation'], new_formation)
            self.propogate(excludedevent)
        else: 
            self.message( 'All current Cloud member in new Cloud Formation.')
        self._setup_formation(new_formation,portal)
        self.handle_local_event(new_formation, ['CloudFormation'])

    def handle_propogation_failure(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        # TODO: generate comm failure error to propogate as well.
        # Progpogate event to Cloud Managers target_peer would have notified.
        
        '''
        The target_peer can be portal or a peer.
        If it is a portal then we will not put it in unreachables and also 
        we do not propogate the event.
        Log if we are not connecting to the portal 
        '''
        portal=self.nformation.get_portal()
        if((portal != None ) and (utils.same_host(target_peer,portal))):
            msg='Portal %s is not reachable .' % portal
            self.message(msg)
            return
            
        
        scheduled = self.unreachable.get(target_peer)
        if scheduled is not None:
            scheduled.cancel()
            self.message('Host %s already listed unreachable, reset scheduled retry.' % target_peer)
        self.unreachable[target_peer] = scheduler.after(5 * 60, self._remove_from_unreachable, (target_peer,))
        self.message('Host %s added to list of unreachable peers.' % target_peer)
        self.propogate(cloudevent, target_peer)

    def _remove_from_unreachable(self, peer):
        if self.unreachable.has_key(peer):
            del(self.unreachable[peer])
            self.message('Removed "%s" from unreachable to retry.' % peer)
        else: self.message('Host "%s" not in unreachable, ignoring remove.' % peer)

    def reset_unreachables(self):
        message = 'Resetting unreachables:\n'
        unreachables = self.unreachable.items()
        self.unreachable.clear()
        for peer, entry in unreachables:
            entry.cancel()
            message += '\t- removed "%s" from unreachables;\n' % peer
        message += '\t%s peers cleared from unreachables.' % len(unreachables)
        self.message(message)
        return len(unreachables)

    def add_listener(self, callback, topic):
        return self.dispatcher.register_for_topic(callback, topic)

    def remove_listener(self, guid):
        return self.dispatcher.unregister(guid)

    def propogate(self, cloudevent, from_peer = None):
        '''
        No Changes - Just make sure ...
        '''
        if not isinstance(cloudevent, CloudEvent):
            raise TypeError('Argument must be instance of CloudEvent')
        if from_peer is None: from_peer = self.peer
        self.message('Propogating as %s:\n\t%s...' % (from_peer, str(cloudevent)))
        target_formation = cloudevent.targets

        notifiers = []
        targets = self.nformation.get_targets(target_formation, from_peer)
        for target in targets:
            if not target.strip():
                self.message('Not notifying "%s" because not valid, adding its targets.' % target)
                targets.extend(self.nformation.get_targets(target_formation, target))
            elif not self.unreachable.has_key(target):
                protocol = "https" if self.secure_http else "http"
                notifier = CloudNotifier(self.channel_monitor, target, 
                                         protocol, '/cloud', self.debug)
                notifier.cloudevent = cloudevent
                notifiers.append(notifier)
            else:
                self.message('Host "%s" unreachable, adding its targets.' % target)
                targets.extend(self.nformation.get_targets(target_formation, target))
        if not cloudevent.has_data():
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
        clouddata = cloudevent.get_data()
        notified = []
        for notifier in notifiers:
            notifier.notify(clouddata, self.handle_propogation_failure)
            notified.append(notifier.peer)
        if(len(notified) > 0 ):
            self.message('Propogate notified: %s' % (notified,))
        return notified

    

    def message(self, message, mtype = msglog.types.DB):
        if mtype != msglog.types.DB or self.debug:
            message = 'CloudManager(%s) - %s' % (self.peer, message)
            msglog.log('broadway', mtype, message)

    security.protect('add_peer', 'Configure')
    def add_peer(self,peer):
        formation=self.nformation.get_formation()
        formation.append(peer)
        portal=self.nformation.get_portal()
        self.update_formation(formation,portal)
        return

    def get_formation(self):
        formation=self.nformation.get_formation()
        return(formation)
    
    def get_portal(self):
        portal=self.nformation.get_portal()
        return(portal)

    security.protect('set_portal', 'Configure')
    def set_portal(self,portal):
        formation=self.nformation.get_formation()
        self.update_formation(formation,portal)

    security.protect('remove_peer', 'Configure')
    def remove_peer(self,peer):
        formation = self.nformation.get_formation()
        formation.remove(peer)
        portal=self.nformation.get_portal()
        self.update_formation(formation,portal)
    
    def start(self):
        # Bad self IP Address 
        if(self.peer == '127.0.0.1' ):
            msg='Cloud facility will not function properly because of local IP address being 127.0.0.1'
            self.message(msg,msglog.types.WARN)
            return 
        if not self.channel_monitor.is_running():
            self.channel_monitor.start_monitor()
        self._pdo=PersistentDataObject(self)
        self.message('The Cloud Manager Persistent Object is in the file :%s' %str(self._pdo.filename()),msglog.types.INFO)
        migration=False
        if(os.path.exists(self._pdo.filename())):
            # Already Migrated
            self._pdo.formation=[self.peer]
            self._pdo.portal=None
            self._pdo.peer=self.peer
            self._pdo.load()
        else:
            # We save a 'default' formation and expect the Cloud Configurator to 
            # update the _pdo.formation via update_information API.
            # The _setup_formation gets called internally from update_information
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
            migration=True
        
        #Bad formation/peer in the PDO
        if( not self._pdo.peer in self._pdo.formation ):
            #Bad formation/peer
            self.message('The Cloud Manager PDO in the file :%s is corrupted. Defaulting to safe configuration' %str(self._pdo.filename()),msglog.types.WARN)
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
        
        self.message('Hosts are :%s portal=%s self=%s' %(str(self._pdo.formation),self._pdo.portal,self._pdo.peer),msglog.types.INFO)
        self.nformation=NFormation(self._pdo.formation,self.peer)
        self.nformation.set_portal(self._pdo.portal)
        
        # IP Address Change Case
        if(not utils.same_host(self.peer,self._pdo.peer)):
            self.message('Self address change detected old=%s new=%s. Fixing the Cloud Formation accordingly' %(str(self._pdo.peer),self.peer),msglog.types.INFO)
            formation = self.nformation.get_formation()
            norm_form=self.nformation.normalize_formation(formation)
            # IP Address Swap
            self_index=norm_form.index(self._pdo.peer)
            formation.pop(self_index)
            formation.insert(0,self.peer)
            self.nformation.set_formation(formation)
               
        '''
        In the Case of Migration, the update_formation() API is called
        by the Cloud Configurator. In the already migrated case, we call the
        update_formation() with the PDO formation and Portal
        '''
        
        self.target_formation = self.nformation.compute_targets()
        
        if(migration == False):
            self.update_formation(self.nformation.get_formation(), self.nformation.get_portal())
        
        if self.subscription is None:
            self.subscription = self.add_listener(
                self.handle_formation_update, 'CloudFormation')

        # Send Cloud Event to all the Hosts for re-sending the alarm events 
        # over to the Portal again - if we are nbmm
        if(self.is_host_nbmm()):
            scheduler.after(10, self.request_for_resending_alarm_events)

        super(CloudManager, self).start()

    def is_host_nbmm(self):
        devices=as_node('/interfaces').children_names()
        if('relay1' in devices ):
            return(False)
        else:
            return(True)

    def get_hosts_list(self):
        hosts_list=[]
        all_hosts=as_node('/services/Host Manager').children_nodes()
        for h in all_hosts:
            hosts_list.append(h.host)
        return(hosts_list)

    def request_for_resending_alarm_events(self):
        hosts_list=self.get_hosts_list()
        for host in hosts_list:
            cloudevent = CloudEvent(self, self.peer, [host],None,['EventResend'],[host])
            cloudevent.set_data(cPickle.dumps(IPickles(cloudevent)))
            self.send_req_for_alarm_events(host,cloudevent)

    def handle_send_failure(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('Unable to notify %s to send alarm events again ' % (target_peer),msglog.types.WARN)

    def handle_send_success(self,notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        self.message('succesfully notified %s to send alarm events again ' % (target_peer))

    def send_req_for_alarm_events(self,target,cloudevent):
        from_peer=self.peer
        protocol = "https" if self.secure_http else "http"
        notifier = CloudNotifier(self.channel_monitor,target, protocol,'/cloud', self.debug)
        notifier.cloudevent = cloudevent
        clouddata = cloudevent.get_data()
        notifier.notify(clouddata, self.handle_send_failure,self.handle_send_success)
Beispiel #9
0
class EnergywiseManager(CompositeNode):
    def __init__(self):
        CompositeNode.__init__(self)
        self._pdo_lock = Lock()
        self._pdo = None
        self.__running = False
        self.debug = 0
        return

    def configure(self,config):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO,
                       'Inside configure' )
        CompositeNode.configure(self, config)
        set_attribute(self, 'debug', 0, config, int)
        return

    def configuration(self):
        config = CompositeNode.configuration(self)
        get_attribute(self, 'debug', config, str)
        return config

  

   # def configure_trend_in_switches(self, start_node, frequency):
    #    for child in start_node.children_nodes():
     #       if child.children_nodes():
      #          self.configure_trend_in_switches(child, frequency)
       #     else:
                # reached upto leaf, each energywise switch has trends as child
        #        child.new_trend(frequency)
        #return

    def delete_trend_configuration(self, trend_domain):
        self._pdo_lock.acquire()
        try:
            if self._pdo.trends.has_key(trend_domain): 
                # stop logging as well
                del self._pdo.trends[trend_domain]
            self._pdo.save()
        finally:
            self._pdo_lock.release()
        return
    def delete_trends(self, trendList):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO, 
                       'Inside delete_trends' )

        for domain in trendList.split(':'):
            if domain:
                domain_node = as_node(domain)
                domain_node.delete_trend()
                self.delete_trend_configuration(domain)
        return
         
    def start(self):
        if self.__running:
            return
        if self.debug:
            msglog.log('EnergywiseManager :', msglog.types.INFO, 'Inside start' )
        CompositeNode.start(self)
#        start_node = as_node('/services/EnergywiseManager/')
#        self.configure_trend_in_switches(start_node, 60)
        self.__running = True
        self._pdo_lock.acquire()
        self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
        self._pdo.trends = {}
        self._pdo.load()
        self._pdo_lock.release()
        if self.has_child('trends'):
            self.trends = self.get_child('trends')
        else:
            self.trends = CompositeNode()
            self.trends.configure({'parent':self, 'name':'trends'})
            self.trends.start()
        # start trending for saved domains
        for domain,freq in self._pdo.trends.items():
            try:
                start_node = as_node(domain)
               # self.configure_trend_in_switches( start_node,freq )
                start_node.new_trend(freq)
            except:
                self.delete_trend_configuration(domain)
        return

    def get_trends(self):
        return self._pdo.trends.items()

    def add_trend_configuration(self, trend_period, trend_domain):
        self._pdo_lock.acquire()
        self._pdo.trends[trend_domain] = trend_period
        self._pdo.save()
        self._pdo_lock.release()
        return
    def save_trends(self, trend_list):
        # Traverse through _pdo.items and check if new domain is either subset
        # of any configured or superset. 
        # If subset return with msg already covered and dont save this
        # If superset then configure new ones and delete subset from 
        # _pdo.items
        '''Adding and saving trends'''
        for point in reversed(trend_list):
            point_period = point['frequency']
            point_domain = point['domain']
            for  saved_domain,saved_period in tuple(self._pdo.trends.items()):
                if saved_domain == point_domain:
                    if saved_period != point_period:
                        self.delete_trend_configuration(saved_domain)
                        break
            if not self._pdo.trends.has_key(point_domain):
                # add this trend
                try:
                    domain_node = as_node(point_domain)
		    if isinstance(domain_node,EnergywiseSwitch) or isinstance(domain_node,EnergywiseDomain):
                         self.add_trend_configuration(point_period, point_domain)
                         domain_node.new_trend(point_period)
                except Exception:
                    msglog.exception()
                    msglog.log(
                        "Energywise",msglog.types.ERR,
                        "Failed to create trend for %r every %r seconds" 
                        %(point_domain,point_period)
                        )
        return


    def stop(self):
        CompositeNode.stop(self)
        self.__running = False
        return
Beispiel #10
0
 def save(self):
     PersistentDataObject.save(self)
     msglog.log('interfaces....BBMD', msglog.types.INFO,
                'Saved BBMD table to Persistent Storage')
Beispiel #11
0
import inspect
from mpx.lib.persistence import storage
from mpx.lib.persistence import datatypes
from mpx.lib.persistent import PersistentDataObject as PDO

data = dict([(str(key), string.ascii_letters) for key in range(10000)])
pdo = PDO('many-key-test')
pdict = datatypes.PersistentDictionary('many-key-test')
pdo.load()
pdodict = pdo.__dict__

items = data.items()
pdostart = time.time()
for key, value in items:
    pdodict[key] = value
    pdo.save()

pdostop = time.time()

pdictstart = time.time()
for key, value in items:
    pdict[key] = value

pdictstop = time.time()

print 'Took %0.4f seconds to set/save %d PDO attributes' % (pdostop - pdostart,
                                                            len(items))
print 'Took %0.4f seconds to set/save %d PDict items' % (
    pdictstop - pdictstart, len(items))

import time
Beispiel #12
0
 def save(self):
     PersistentDataObject.save(self)
     msglog.log("interfaces....BBMD", msglog.types.INFO, "Saved BBMD table to Persistent Storage")
class SynchronizedExporter(Exporter,EventConsumerMixin):
    def __init__(self):
        self.running = 0
        self._lock = Lock()
        Exporter.__init__(self)
        EventConsumerMixin.__init__(self,self.handle_log,self.handle_error)
    def debug_information(self,message):
        if self.debug:
            debug = '%s Exporter => %s' % (self.name,message)
            msglog.log('broadway',msglog.types.DB,debug)
    def handle_log(self,event):
        self._event_count += 1
        self.debug_information('Log entry event caught.')
        if self._event_count >= self.log_multiple:
            self.debug_information('Going to start export thread.')
            if self._lock.acquire(0):
                try:
                    thread = Thread(name=self.name, target=self.go,
                                    args=(event.values[0],))
                    thread.start()
                    self._event_count = 0
                finally:
                    self._lock.release()
            else:
                msglog.log('broadway',msglog.types.WARN, 
                           ('Last export still active, ' + 
                            'skipping current request.'))
    def handle_error(self,exc):
        msglog.exception(exc)
    def configure(self, config):
        set_attribute(self,'log_multiple',1,config,int)
        set_attribute(self,'timeout',60,config,int)
        set_attribute(self,'connection_node','/services/network',config)
        set_attribute(self,'connection_attempts',3,config,int)
        Exporter.configure(self, config)
    def configuration(self):
        config = Exporter.configuration(self)
        get_attribute(self,'log_multiple',config,str)
        get_attribute(self,'connection_node',config)
        get_attribute(self,'connection_attempts',config)
        get_attribute(self,'timeout',config,int)
        return config
    def start(self):
        Exporter.start(self)
        if not self.running:
            self.running = 1
            self.connection = as_node(self.connection_node)
            self._event_count = self.log_multiple - 1
            self._time_keeper = PersistentDataObject(self)
            self._time_keeper.start_time = 0
            self._time_keeper.load()
            self._period = self.parent.parent.period
            self.parent.parent.event_subscribe(self, LogAddEntryEvent)
        else: 
            raise EAlreadyRunning
    def stop(self):
        self.running = 0
    def scheduled_time(self):
        return self._end_time
    def go(self, end_time):
        self.debug_information('Exporting.')
        self._lock.acquire()
        try:
            self._end_time = end_time
            self._export(end_time)
            self._end_time = None
            self.debug_information('Done Exporting.')
        except:
            msglog.exception()
        self._lock.release()
    def _export(self,end_time):
        attempts = 0
        connected = 0
        while attempts < self.connection_attempts:
            self.debug_information('Acquiring connection...')
            try:
                connected = self.connection.acquire()
            except:
                msglog.exception()
            if connected:
                self.debug_information('Connection acquired.')
                break
            self.debug_information('Failed to acquire.')
            attempts += 1
        else:
            self.debug_information('Connection failed, aborting.')
            raise EConnectionError('Failed to connect %s times' % attempts)
        try:
            last_break = 0
            end = end_time
            start_time = self._time_keeper.start_time
            while start_time <= end_time:
                self.debug_information('Getting data from %s to %s.' 
                                       % (start_time,end))
                data = self.log.get_range('timestamp',start_time,end)
                if not data:
                    self.debug_information('No Data to export.')
                    raise ENoData('timestamp',start_time,end)
                try:
                    self.debug_information('Calling format.')
                    output = self.formatter.format(data)
                    self.debug_information('Calling transport.')
                    self.transporter.transport(output)
                    self.debug_information('Done transporting.')
                    start_time = end + self._period
                except EBreakupTransfer, e:
                    entry = e.break_at
                    self.debug_information('Breaking up transfer.')
                    if entry['timestamp'] == last_break:
                        # prevents loop where transporter is just failing.
                        raise EIOError('EBreakupTransfer not progressing.')
                    last_break = entry['timestamp']
                    end = last_break - self._period
                    msglog.log('broadway',msglog.types.WARN,
                               'Breaking up data transfer at %s.' % end)
                else:
                    end = end_time
                    self._time_keeper.start_time = start_time
                    self._time_keeper.save()
        finally:
            if connected:
                self.connection.release()
Beispiel #14
0
 def set_meta_value(self,name,value):
     self.meta[name] = value
     PersistentDataObject.save(self)
class SynchronizedExporter(Exporter, EventConsumerMixin):
    def __init__(self):
        self.running = 0
        self._lock = Lock()
        Exporter.__init__(self)
        EventConsumerMixin.__init__(self, self.handle_log, self.handle_error)

    def debug_information(self, message):
        if self.debug:
            debug = '%s Exporter => %s' % (self.name, message)
            msglog.log('broadway', msglog.types.DB, debug)

    def handle_log(self, event):
        self._event_count += 1
        self.debug_information('Log entry event caught.')
        if self._event_count >= self.log_multiple:
            self.debug_information('Going to start export thread.')
            if self._lock.acquire(0):
                try:
                    thread = Thread(name=self.name,
                                    target=self.go,
                                    args=(event.values[0], ))
                    thread.start()
                    self._event_count = 0
                finally:
                    self._lock.release()
            else:
                msglog.log('broadway', msglog.types.WARN,
                           ('Last export still active, ' +
                            'skipping current request.'))

    def handle_error(self, exc):
        msglog.exception(exc)

    def configure(self, config):
        set_attribute(self, 'log_multiple', 1, config, int)
        set_attribute(self, 'timeout', 60, config, int)
        set_attribute(self, 'connection_node', '/services/network', config)
        set_attribute(self, 'connection_attempts', 3, config, int)
        Exporter.configure(self, config)

    def configuration(self):
        config = Exporter.configuration(self)
        get_attribute(self, 'log_multiple', config, str)
        get_attribute(self, 'connection_node', config)
        get_attribute(self, 'connection_attempts', config)
        get_attribute(self, 'timeout', config, int)
        return config

    def start(self):
        Exporter.start(self)
        if not self.running:
            self.running = 1
            self.connection = as_node(self.connection_node)
            self._event_count = self.log_multiple - 1
            self._time_keeper = PersistentDataObject(self)
            self._time_keeper.start_time = 0
            self._time_keeper.load()
            self._period = self.parent.parent.period
            self.parent.parent.event_subscribe(self, LogAddEntryEvent)
        else:
            raise EAlreadyRunning

    def stop(self):
        self.running = 0

    def scheduled_time(self):
        return self._end_time

    def go(self, end_time):
        self.debug_information('Exporting.')
        self._lock.acquire()
        try:
            self._end_time = end_time
            self._export(end_time)
            self._end_time = None
            self.debug_information('Done Exporting.')
        except:
            msglog.exception()
        self._lock.release()

    def _export(self, end_time):
        attempts = 0
        connected = 0
        while attempts < self.connection_attempts:
            self.debug_information('Acquiring connection...')
            try:
                connected = self.connection.acquire()
            except:
                msglog.exception()
            if connected:
                self.debug_information('Connection acquired.')
                break
            self.debug_information('Failed to acquire.')
            attempts += 1
        else:
            self.debug_information('Connection failed, aborting.')
            raise EConnectionError('Failed to connect %s times' % attempts)
        try:
            last_break = 0
            end = end_time
            start_time = self._time_keeper.start_time
            while start_time <= end_time:
                self.debug_information('Getting data from %s to %s.' %
                                       (start_time, end))
                data = self.log.get_range('timestamp', start_time, end)
                if not data:
                    self.debug_information('No Data to export.')
                    raise ENoData('timestamp', start_time, end)
                try:
                    self.debug_information('Calling format.')
                    output = self.formatter.format(data)
                    self.debug_information('Calling transport.')
                    self.transporter.transport(output)
                    self.debug_information('Done transporting.')
                    start_time = end + self._period
                except EBreakupTransfer, e:
                    entry = e.break_at
                    self.debug_information('Breaking up transfer.')
                    if entry['timestamp'] == last_break:
                        # prevents loop where transporter is just failing.
                        raise EIOError('EBreakupTransfer not progressing.')
                    last_break = entry['timestamp']
                    end = last_break - self._period
                    msglog.log('broadway', msglog.types.WARN,
                               'Breaking up data transfer at %s.' % end)
                else:
                    end = end_time
                    self._time_keeper.start_time = start_time
                    self._time_keeper.save()
        finally:
            if connected:
                self.connection.release()
Beispiel #16
0
class FTPTransporter(Transporter):
    _last = None

    ##
    # Configure object.
    #
    # @key url  The url that data is to be sent
    #           to.  For example: ftp.hostname.com/tmp/.
    # @key username  The username for the FTP session.
    # @key password  The password for the FTP session.
    # @key file_prefix  The prefix for files that are
    #                   created when data is uploaded.
    # @key file_suffix The suffix to be appended to all
    #                  files created when data is uploaded.
    # @default .dat
    # @key name_scheme  Naming scheme to be used for created
    #                   each file.
    # @value timestamp  Insert timestamp between file_prefix
    #                   and file_suffix for each upload.
    # @value count  Insert incrmemental count of uploads between
    #               prefix and suffix.
    # @value none  Do not use a naming scheme.  Each upload
    #              will overwrite the previous upload.
    # @default timestamp.
    #
    def configure(self, config):
        set_attribute(self, 'host', REQUIRED, config)
        set_attribute(self, 'port', 21, config, int)
        set_attribute(self, 'directory', '', config)
        set_attribute(self, 'username', REQUIRED, config)
        set_attribute(self, 'password', REQUIRED, config)
        #CSCtn64870
        if (config.has_key('timeout') and config['timeout'] == ''):
            config['timeout'] = 'None'
        set_attribute(self, 'timeout', None, config, float)
        set_attribute(self, 'file_prefix', 'cisco', config)
        set_attribute(self, 'file_suffix', '.dat', config)
        set_attribute(self, 'name_scheme', 'timestamp', config)
        set_attribute(self, 'timestamp_format', '%s', config)
        set_attribute(self, 'passive_mode', 1, config, as_boolean)
        set_attribute(self, 'file_append', 0, config, as_boolean)
        Transporter.configure(self, config)
        if self._last is None:
            self._last = PersistentDataObject(self)
            self._last.filename = None
            self._last.count = 1
            self._last.load()

    def configuration(self):
        config = Transporter.configuration(self)
        get_attribute(self, 'host', config)
        get_attribute(self, 'port', config, str)
        get_attribute(self, 'directory', config)
        get_attribute(self, 'username', config)
        get_attribute(self, 'password', config)
        get_attribute(self, 'timeout', config, str)
        get_attribute(self, 'file_prefix', config)
        get_attribute(self, 'file_suffix', config)
        get_attribute(self, 'name_scheme', config)
        get_attribute(self, 'timestamp_format', config)
        get_attribute(self, 'passive_mode', config, as_onoff)
        get_attribute(self, 'file_append', config, str)
        return config

    def transport(self, data):
        filename = self._generate_filename()
        if type(data) == type(''):
            data = StringIO(data)
        ftp = ftplib.FTP()
        ftp.connect(self.host, self.port, self.timeout)
        finished = 0
        try:
            ftp.login(self.username, self.password)
            ftp.set_pasv(self.passive_mode != 0)
            if self.file_append and not self.name_scheme:
                ftp.storlines('APPE ' + self._full_file_name(filename), data)
            else:
                ftp.storlines('STOR ' + self._full_file_name(filename), data)
            self._last.save()
            finished = 1
            data.close()
        finally:
            if not finished:
                # quit hangs is exception.
                ftp.close()
            else:
                try:
                    ftp.quit()
                except:
                    ftp.close()

    def _generate_filename(self):
        append = ''
        filename = self.file_prefix
        if self.name_scheme == 'incremental':
            append = '%s' % self._last.count
        elif self.name_scheme == 'timestamp':
            filetime = self.parent.time_function(self.parent.scheduled_time())
            filename += time.strftime(self.timestamp_format, filetime)
            append = '_%s' % (self._last.count + 1)
            if filename != self._last.filename:
                self._last.count = 0
                append = ''
        self._last.count += 1
        self._last.filename = filename
        return filename + append + self.file_suffix

    def _full_file_name(self, filename):
        if self.directory:
            if self.directory[-1:] == '/':
                filename = self.directory + filename
            else:
                filename = self.directory + '/' + filename
        return filename
Beispiel #17
0
 def set_meta_value(self, name, value):
     self.meta[name] = value
     PersistentDataObject.save(self)
Beispiel #18
0
import inspect
from mpx.lib.persistence import storage
from mpx.lib.persistence import datatypes
from mpx.lib.persistent import PersistentDataObject as PDO

data = dict([(str(key), string.ascii_letters) for key in range(10000)])
pdo = PDO('many-key-test')
pdict = datatypes.PersistentDictionary('many-key-test')
pdo.load()
pdodict = pdo.__dict__

items = data.items()
pdostart = time.time()
for key,value in items:
    pdodict[key] = value
    pdo.save()


pdostop = time.time()

pdictstart = time.time()
for key,value in items:
    pdict[key] = value


pdictstop = time.time()

print 'Took %0.4f seconds to set/save %d PDO attributes' % (pdostop - pdostart, len(items))
print 'Took %0.4f seconds to set/save %d PDict items' % (pdictstop - pdictstart, len(items))

Beispiel #19
0
class Logger(ServiceNode):

    ##
    # @author Craig Warren
    # @param config
    # @return None
    def configure(self, config):
        ServiceNode.configure(self, config)

    ##
    # @author Craig Warren
    #   starts the logger service
    # @return None
    def start(self):
        ServiceNode.start(self)
        # this will correctly add the msglog as a child
        #  to the logger.
        if 'msglog' not in self.children_names():
            columns = mpx.lib.msglog.get_columns()
            log = Log()
            log.configure({'name': 'msglog', 'parent': self})
            for c in columns:
                column = mpx.lib.factory('mpx.service.logger.column')
                config = c.configuration()
                config['parent'] = log
                column.configure(config)
        self._logs = PersistentDataObject(self)
        self._logs.names = []
        self._logs.load()
        for name in self._logs.names:
            if ((not mpx.lib.log.log_exists(name))
                    and (name not in self.children_names())):
                log = mpx.lib.log.log(name)
                log.destroy()
                del (log)
        self._logs.names = []
        for child in self.children_nodes():
            if not isinstance(child, Alias):
                # Don't manage other managers' logs...
                self._logs.names.append(child.name)
        self._logs.save()

    ##
    # @author Craig Warren
    #   stops the logger service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # @author Craig Warren
    # @param log_name
    #   the name of the log to return
    # @return Log
    #   returns the log if it can't find the log it
    #   returns None
    def get_log(self, log_name):
        for child in self.children_nodes():
            if child.name == log_name:
                return child
        return None
Beispiel #20
0
class FTPTransporter(Transporter):
    _last = None
    ##
    # Configure object.
    #
    # @key url  The url that data is to be sent
    #           to.  For example: ftp.hostname.com/tmp/.
    # @key username  The username for the FTP session.
    # @key password  The password for the FTP session.
    # @key file_prefix  The prefix for files that are
    #                   created when data is uploaded.
    # @key file_suffix The suffix to be appended to all
    #                  files created when data is uploaded.
    # @default .dat
    # @key name_scheme  Naming scheme to be used for created
    #                   each file.
    # @value timestamp  Insert timestamp between file_prefix
    #                   and file_suffix for each upload.
    # @value count  Insert incrmemental count of uploads between
    #               prefix and suffix.
    # @value none  Do not use a naming scheme.  Each upload
    #              will overwrite the previous upload.
    # @default timestamp.
    #
    def configure(self, config):
        set_attribute(self, 'host', REQUIRED, config)
        set_attribute(self, 'port', 21, config, int)
        set_attribute(self, 'directory', '', config)
        set_attribute(self, 'username', REQUIRED, config)
        set_attribute(self, 'password', REQUIRED, config)
        #CSCtn64870
        if (config.has_key('timeout') and config['timeout'] == ''):
            config['timeout'] = 'None'
        set_attribute(self, 'timeout', None, config, float)
        set_attribute(self, 'file_prefix', 'cisco', config)
        set_attribute(self, 'file_suffix', '.dat', config)
        set_attribute(self, 'name_scheme', 'timestamp', config)
        set_attribute(self, 'timestamp_format', '%s', config)
        set_attribute(self, 'passive_mode', 1, config, as_boolean)
        set_attribute(self, 'file_append', 0, config, as_boolean)
        Transporter.configure(self, config)
        if self._last is None:
            self._last = PersistentDataObject(self)
            self._last.filename = None
            self._last.count = 1
            self._last.load()
    def configuration(self):
        config = Transporter.configuration(self)
        get_attribute(self, 'host', config)
        get_attribute(self, 'port', config, str)
        get_attribute(self, 'directory', config)
        get_attribute(self, 'username', config)
        get_attribute(self, 'password', config)
        get_attribute(self, 'timeout', config, str)
        get_attribute(self, 'file_prefix', config)
        get_attribute(self, 'file_suffix', config)
        get_attribute(self, 'name_scheme', config)
        get_attribute(self, 'timestamp_format', config)
        get_attribute(self, 'passive_mode', config, as_onoff)
        get_attribute(self, 'file_append', config, str)
        return config
    def transport(self, data):
        filename = self._generate_filename()
        if type(data) == type(''):
            data = StringIO(data)
        ftp = ftplib.FTP()
        ftp.connect(self.host, self.port, self.timeout)
        finished = 0
        try:
            ftp.login(self.username, self.password)
            ftp.set_pasv(self.passive_mode != 0)
            if self.file_append and not self.name_scheme:
                ftp.storlines('APPE ' + self._full_file_name(filename), data)
            else:
                ftp.storlines('STOR ' + self._full_file_name(filename), data)
            self._last.save()
            finished = 1
            data.close()
        finally:
            if not finished:
                # quit hangs is exception.
                ftp.close()
            else:
                try:
                    ftp.quit()
                except:
                    ftp.close()
    def _generate_filename(self):
        append = ''
        filename = self.file_prefix
        if self.name_scheme == 'incremental':
            append = '%s' % self._last.count
        elif self.name_scheme == 'timestamp':
            filetime = self.parent.time_function(self.parent.scheduled_time())
            filename += time.strftime(self.timestamp_format, filetime)
            append = '_%s' % (self._last.count + 1)
            if filename != self._last.filename:
                self._last.count = 0
                append = ''
        self._last.count += 1
        self._last.filename = filename
        return filename + append + self.file_suffix
    def _full_file_name(self, filename):
        if self.directory:
            if self.directory[-1:] == '/':
                filename =  self.directory + filename
            else:
                filename = self.directory + '/' + filename
        return filename
Beispiel #21
0
 def __save(self):
     result = PersistentDataObject.save(self)
     self.__snapshot(self.saved())
     return result
Beispiel #22
0
class XMLFormatter(Formatter):
    MIME_TYPE='text/xml'
    def __init__(self):
        Formatter.__init__(self)
        self._channels = {} # {name:{uom:,meastype:,Delta:,Totalized:,key:}}
        self._exception_log = None
        # NEVER CREATE A PDO BEFORE THE NODE IS INSERTED IN THE NODE TREE!
        self._PDO = None
        return
    ##
    # @param config
    # @key timestamp_format the timestamp format string example: Y-%m-%dT%H:%M:%S.
    # @key info the information that will be placed in the info attribute of the 
    # data tag
    def configure(self, config):
        Formatter.configure(self, config)
        set_attribute(self, 'debug_lvl', 0, config, int)
        set_attribute(self, 'timestamp_format', '%Y-%m-%dT%H:%M:%S', config)
        set_attribute(self, 'pretty_format',0,config,as_boolean)
        set_attribute(self, 'location_info','DefaultLocationInfo',config)
        set_attribute(self, 'location_key','DefaultLocationKey',config)
        set_attribute(self, 'panel_info','DefaultPanelInfo',config)
        set_attribute(self, 'panel_key','DefaultPanelKey',config)
        set_attribute(self, 'capture_period',24.0,config,float) # capture period preceding data transmission time (hrs)
        set_attribute(self, 'exception_log_url','/services/logger/fsg_exception_log',config)
    ##
    # @returns returns the configuratation
    def configuration(self):
        config = Formatter.configuration(self)
        get_attribute(self, 'debug_lvl', config, int)
        get_attribute(self, 'timestamp_format', config)
        get_attribute(self, 'pretty_format',config,str)
        get_attribute(self, 'location_info',config)
        get_attribute(self, 'location_key',config)
        get_attribute(self, 'panel_info',config)
        get_attribute(self, 'panel_key',config)
        get_attribute(self, 'capture_period',config,float) # capture period preceding data transmission time (hrs)
        get_attribute(self, 'exception_log_url',config)
        return config
    
    def start(self):
        self._PDO = PersistentDataObject(self,dmtype=GC_NEVER)
        self._PDO.exception_log_last_time = 0.0
        self._PDO.load()
        # Scan subtree of grandparent logger for channel (column) 'fsg_attrs'
        # nodes containing info required for FSG Demo, so that we don't have
        # to do the scan every time format() is called:
        self._channels = {}
        columns_node = self.parent.parent.parent.get_child('columns')
        column_nodes = columns_node.children_nodes()
        for column_node in column_nodes:
            if column_node.name == 'timestamp':
                continue
            assert isinstance(column_node, ChannelAttrsColumn) \
                   or isinstance(column_node, ChannelAttrsDeltaColumn), \
                   'Column %s should be class ChannelAttrsColumn, but is class %s' \
                   % (column_node.name, column_node.__class__.__name__)
            self._channels[column_node.name] = {
                'channel_node':column_node,'values':[]

                }
            
        self._exception_log = None
        try:
            self._exception_log = as_node(self.exception_log_url)
        except ENoSuchName:
            pass
        return
    ##
    # cancel():
    # Called by exporter if attempted transport fails, to clear out pre-formatted
    # data waiting in self._channels value dicts. Else, the pre-formatted data
    # in self._channels is still present at next attempt, and will cause transport
    # of multiple copies of same data:
    #
    def cancel(self):
        for channel_dict in self._channels.values():
            channel_dict['values'] = []
        return
    ##
    # @param data list of dictionary values to be converted in to XML format.
    # @param pretty_format 0,1 optional parameter to return pretty xml, xml that has
    # carriage returns in it
    # @default 0
    # @note timestamp MUST be on of the dictionary keys.
    # @trhows EIncompatibleFormat if timestamp is not a key in a dictionary entry.
    def format(self, data, pretty_format=None):
        # Organize all log data (list of time-based dicts) into a dict of 
        # point-based lists. (Dict of lists could get REALLY large; may 
        # need to do only one point at a time...
        # self._channels:K=col_name,V=col_dict
        # col_dict:K='column_node':,'values':list_of_2tuples
        # list_of_2tuples: [(timestamp,value),]
        # Only want records for preceding self.capture_period-hr period:
        end_time = time.time()
        start_time = self.parent.last_time() # ASSUME that parent is a periodic exporter...
        # Comment out line below, in favor of line above, because FSG tends to
        # disable their FTP server (effectively) for days at a time, but still
        # want all the data gathered during those blackout periods to go to the
        # FTP server when the server reappears with respect to the Mediator. 
        # This change means that the FTP server recvs table-formatted data all 
        # the way back to the last successful export, regardless of the
        # actual size of that data:
        #start_time = end_time - (self.capture_period * 3600.0)
        data_to_send = 0
        data = data[:]
        self.debug_print('Data: %s' % data,None,1)
        removed_channels = []
        for log_rec_dict in data:
            timestamp = log_rec_dict['timestamp']
            if (timestamp < start_time) \
               or (timestamp > end_time):
                continue
            for channel_name in log_rec_dict.keys():
                if channel_name == 'timestamp':
                    continue
                if not self._channels.has_key(channel_name):
                    if not channel_name in removed_channels:
                        msglog.log('fsg:xml_formatter',msglog.types.ERR, \
                                   'Channel %s has been removed from the configuration.' \
                                   % channel_name)
                        removed_channels.append(channel_name)
                    continue
                data_to_send = 1
                self._channels[channel_name]['values'].append((timestamp,log_rec_dict[channel_name],))
        channel_names = self._channels.keys() # it's a list
        # Organize all data from exception log, if any:
        exception_dicts = {} # K:trigger name, V:time-sorted list of  2tuples
                             # (timestamp, message)
        if not self._exception_log is None:
            if self._PDO.exception_log_last_time > start_time:
                start_time = self._PDO.exception_log_last_time + 0.00001 # do not re-send already-sent data
            exception_data = self._exception_log.get_range('timestamp',start_time,end_time)
            for log_rec_dict in exception_data:
                trigger_node_url = log_rec_dict['trigger_node_url']
                trigger_node = as_node(trigger_node_url)
                assert isinstance(trigger_node, FsgComparisonTrigger), \
                       'Node %s should be FsgComparisonTrigger, is %s' \
                       % (trigger_node.name, trigger_node.__class__)
                timestamp = log_rec_dict['timestamp']
                trigger_node_msg = log_rec_dict['trigger_node_msg']
                if not exception_dicts.has_key(trigger_node_url):
                    exception_dicts[trigger_node_url] = {'trigger_node_url':trigger_node_url,'timestamps':[(timestamp,trigger_node_msg,)]}
                else:
                    exception_dicts[trigger_node_url]['timestamps'].append((timestamp,trigger_node_msg,))
                self._PDO.exception_log_last_time = timestamp
                self._PDO.save()
        if (data_to_send == 0) and (len(exception_dicts) == 0):
            msglog.log('fsg:xml_formatter',msglog.types.INFO,'No data or exceptions to send.')
            return None # nothing to send
        # Create an output stream to minimize the combined size of the XML
        # file and the remaining point_dicts contents during formatting:
        stream = StreamWithCallback(self.output_callback)
        stream.set_meta('channel_names',channel_names)
        stream.set_meta('exception_data',exception_dicts.values()) # pass in a list of "values" (dicts), to allow easy iteration
        stream.set_meta('index',0) # number of point time-value lists written to XML output stream
        formatter = SGMLFormatter()
        # Write opening tags:
        formatter.open_tag('data', 
                           info=self.location_info,
                           key=self.location_key
                           )
        formatter.open_tag('device', 
                           info=self.panel_info,
                           key=self.panel_key
                           )
        output = formatter.output()
        self.debug_print(output,None,1)
        stream.write(output)
        stream.set_meta('formatter',formatter)
        stream.set_meta('remaining', '')
        data_mode = 'channels'
        if data_to_send == 0:
            data_mode = 'exceptions' # no data for channels, so skip 'em
        stream.set_meta('data_mode',data_mode)
        return stream
    def output_callback(self, stream):
        remaining = stream.get_meta_value('remaining')
        if remaining:
            count = stream.write(remaining)
            remaining = remaining[count:]
            stream.set_meta('remaining',remaining)
            if remaining:
                return None
            data_mode = stream.get_meta_value('data_mode')
            if data_mode == 'close':
                stream.close()
                return
        del remaining
        index = stream.get_meta_value('index')
        formatter = stream.get_meta_value('formatter')
        data_mode = stream.get_meta_value('data_mode')
        if data_mode == 'channels':
            channel_names = stream.get_meta_value('channel_names')
            for i in range(index,index+5): # try writing 5 channel time-value lists at a time
                channel_dict = {}
                try:
                    channel_dict = self._channels[channel_names[i]]
                except IndexError: # no more data available; move on to exceptions
                    data_mode = 'exceptions'
                    stream.set_meta('data_mode', data_mode)
                    break
                channel_node = channel_dict['channel_node']
                formatter.open_tag('channel',name=channel_node.channel_name,
                                   uom=channel_node.uom,
                                   meastype=channel_node.meastype,
                                   Delta=channel_node.Delta,
                                   Totalized=channel_node.Totalized,
                                   key=channel_node.key,)
                stream.set_meta('index',i+1)
                for timestamp,value in channel_dict['values']:
                    ts = self.parent.time_function(timestamp)
                    ts_str = time.strftime(self.timestamp_format, ts)
                    formatter.open_tag('value',timestamp=ts_str)
                    formatter.add_text(str(value))
                    formatter.close_tag('value')
                formatter.close_tag('channel')
                output = formatter.output()
                self.debug_print(output,None,1)
                channel_dict['values'] = [] # save memory: data is now in stream or remaining
                count = stream.write(output)
                if count != len(output):
                    stream.set_meta('remaining',output[count:])
                    return None
        if data_mode == 'exceptions':
            exception_data = stream.get_meta_value('exception_data')
            for i in range(0,5): # try writing 5 exception lists at a time
                exception_dict = {}
                try:
                    exception_dict = exception_data[i]
                except IndexError: # no more exceptions available; close out XML file
                    try:
                        formatter.close_tag('device')
                        formatter.close_tag('data')
                        output = formatter.output()
                        self.debug_print(output,None,1)
                        count = stream.write(output)
                        if count != len(output):
                            stream.set_meta('remaining',output[count:])
                            data_mode = 'close'
                            stream.set_meta('data_mode', data_mode)
                            return None
                        stream.close()
                        return None
                    except:
                        msglog.exception()
                        return None
                trigger_node_url = exception_dict['trigger_node_url']
                trigger_node = as_node(trigger_node_url)
                formatter.open_tag('exception',name=trigger_node.name,
                                  key=trigger_node.key,)
                stream.set_meta('index',i+1)
                for exc_timestamp,msg in exception_dict['timestamps']:
                    ts = self.parent.time_function(exc_timestamp)
                    ts_str = time.strftime(self.timestamp_format, ts)
                    formatter.open_tag('value',timestamp=ts_str)
                    formatter.add_text(msg)
                    formatter.close_tag('value')
                formatter.close_tag('exception')
                output = formatter.output()
                self.debug_print(output,None,1)
                count = stream.write(output)
                if count != len(output):
                    stream.set_meta('remaining',output[count:])
                    return None
            del exception_data[0:5] # save memory: exception data is now in XML output stream
        return None
    def debug_print(self, msg_fmt_str, msg_value_tuple=None, msg_lvl=1):
        if msg_lvl <= self.debug_lvl:
            if msg_value_tuple is None:
                prn_msg = 'FsgXmlFmttr: ' + msg_fmt_str
            else:
                prn_msg = 'FsgXmlFmttr: ' + (msg_fmt_str % msg_value_tuple)
            print prn_msg
            self.parent.msglog(prn_msg)
        return
Beispiel #23
0
class ExportersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)
    def __init__(self, *args):
        self._pdo_lock = Lock()
        self.manager = None
        super(ExportersConfigurator, self).__init__(*args)
    def configure(self, config):
        self.setattr('path', config.get('path','/exportconfig'))
        self.setattr('container', config.get('container','/services/Alarm Exporters'))
        self.secured = as_internal_node("/services").secured
        super(ExportersConfigurator, self).configure(config)
    def configuration(self):
        config = super(ExportersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['container'] = self.getattr('container')
        return config
    def start(self):
        self.container = self.nodespace.as_node(self.container)
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.exporters = {}
            self._pdo.load()
            exporterdumps = self._pdo.exporters.values()
        finally: 
            self._pdo_lock.release()
        super(ExportersConfigurator, self).start()
        tstart = time.time()
        for exporterdump in exporterdumps:
            IPickles(cPickle.loads(exporterdump))()
        tend = time.time()
        tlapse = tend - tstart
        msglog.log('broadway', msglog.types.INFO,
                   'Exporter Configurator loaded '
                   '%s exporters in %s seconds.' % (len(exporterdumps), tlapse))
        self.manager = self.container
    def stop(self):
        super(ExportersConfigurator, self).stop()
        self.container = None
    def match(self, path):
        return path.startswith(self.path)
    security.protect('create_node', 'Configure')
    def create_node(self, name, config=()):
        config = dict(config)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        exporter = self.manager.nodespace.create_node(AlarmExporter)
        exporter.configure(config)
        exporter.start()
        self.updatepdo()
        return exporter.name
    security.protect('remove_node', 'Configure')
    def remove_node(self, name):
        exporter = self.manager.get_child(name)
        exporter.prune()
        self.updatepdo()
        return exporter.name
    security.protect('configure_node', 'Configure')
    def configure_node(self, name, config):
        exporter = self.manager.get_child(name)
        exporter.configure(config)
        self.updatepdo()
        return exporter.name
    security.protect('node_configuration', 'View')
    def node_configuration(self, name, extended=False):
        exporter = self.manager.get_child(name)
        return exporter.configuration()
    security.protect('configure_formatter', 'Configure')
    def configure_formatter(self, exporter, config):
        return self.configure_node(exporter, {"formatter": config})
    security.protect('formatter_configuration', 'View')
    def formatter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("formatter", {})
    security.protect('configure_transporter', 'Configure')
    def configure_transporter(self, exporter, config):
        return self.configure_node(exporter, {"transporter": config})
    security.protect('transporter_configuration', 'View')
    def transporter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("transporter", {})
    security.protect('trigger_configuration', 'View')
    def trigger_configuration(self, name=None):
        manager = self.nodespace.as_node('/services/Alarm Manager')
        sources = [manager] + manager.get_alarms()
        configuration = dict([(source.url, []) for source in sources])
        if name:
            exporter = self.manager.get_child(name)
            configuration.update(exporter.trigger_configuration())
        configs = []
        for source,events in configuration.items():
            configs.append({"source": source, "events": events})
        return configs
    security.protect('configure_triggers', 'Configure')
    def configure_triggers(self, name, triggers=()):
        configuration = {}
        for config in triggers:
            configuration[config["source"]] = config["events"]
        exporter = self.manager.get_child(name)
        exporter.configure_triggers(configuration)
        self.updatepdo()
    security.protect('get_node_names', 'View')
    def get_node_names(self):
        return self.manager.children_names()
    def updatepdo(self):
        exporters = {}
        self._pdo_lock.acquire()
        try:
            for exporter in self.manager.get_exporters():
                exporters[exporter.name] = cPickle.dumps(IPickles(exporter))
            self._pdo.exporters = exporters
            self._pdo.save()
        finally: 
            self._pdo_lock.release()
    def handle_request(self, request):
        update_pdo = False
        response = Response(request)
        request_data = request.get_post_data_as_dictionary()
        request_data.update(request.get_query_string_as_dictionary())
        if request_data.has_key('add'):
            adapt = self.create_exporter("New Exporter")
        elif request_data.has_key('remove'):
            name = urllib.unquote_plus(request_data['remove'][0])
            self.remove_exporter(name)
            adapt = self.container
        elif request_data.has_key('edit'):
            name = urllib.unquote_plus(request_data['edit'][0])
            update_pdo = False
            adapt = self.container.get_exporter(name)
        elif request_data.has_key('configure'):
            name = urllib.unquote_plus(request_data['configure'][0])
            exporter = self.container.get_exporter(name)
            config = {'Exporter': {}, 'Formatter': {}, 'Transporter': {}}
            for attrname in request_data.keys():
                splitname = attrname.split('.')
                if len(splitname) == 2 and config.has_key(splitname[0]):
                    config[splitname[0]][splitname[1]] = urllib.unquote_plus(request_data[attrname][0])
            exportconfig = config['Exporter']
            exportconfig['formatter'] = config['Formatter']
            exportconfig['transporter'] = config['Transporter']
            exporter.configure(exportconfig)
            update_pdo = True
            adapt = exporter
        else: 
            adapt = self.container
        if request_data.has_key('actionName'):
            target = urllib.unquote_plus(request_data.get('target')[0])
            action = urllib.unquote_plus(request_data.get('actionName')[0])
            params = map(urllib.unquote_plus, request_data.get('params'))
            exporter = self.container.get_exporter(target)
            method = getattr(exporter, action)
            result = method(*params)
            update_pdo = True
        if update_pdo:
            self.updatepdo()
        webadapter = IWebContent(adapt)
        response.send(webadapter.render())
Beispiel #24
0
class Control(CompositeNode):
    ##
    # This attribute is used in the introspective generation
    # of configuration data.
    __module__ = mpx.service.control.__name__

    def __init__(self):
        CompositeNode.__init__(self)
        self._status = 'initialized'
        self._stale_apps = []

    def configure(self, config):
        self._pdo = PersistentDataObject(self)
        self._pdo.stats_dict = {
        }  #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
        self._pdo.load()
        # write_priority can be set for the entire control service, though
        # it may be specialized at the individual application node level.
        set_attribute(self, 'write_priority', 9, config, int)
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        self.pdo_file = self._pdo.filename()
        get_attribute(self, 'write_priority', config)
        get_attribute(self, 'pdo_file', config, str)
        return config

    def _save_pdo(
            self):  #no locking needed since the load and save cannot overlap
        start_time = time.time()
        self._pdo.save()
        msglog.log(
            self.as_node_url(), msglog.types.INFO,
            'Control service configuration data saved in: %s seconds' %
            (str(time.time() - start_time), ))

    def start(self):
        self._status = 'starting'
        self.stats_dict = {}  #clear out stats dict to force reload of app
        self.application_change_detector(1)  #starting

    def _start(self):
        CompositeNode.start(self)  #get the children ready for a trip...
        #now that the children are started, go back through the list and finish up the "graphical compile"
        for n in self.children_nodes():
            if n.hasattr('map_output_connections'):
                n.map_output_connections()
        for n in self.children_nodes():
            if n.hasattr('map_reference_output_connections'):
                n.map_reference_output_connections()
        for n in self.children_nodes():
            if n.hasattr('resolve_inputs'):
                n.resolve_inputs()
        for n in self.children_nodes():
            if n.hasattr('prepare_run_list'):
                n.prepare_run_list()
        for n in self.children_nodes():
            if n.hasattr('trigger_run_list'):
                n.trigger_run_list()

    def prune_orphaned_schedules(self):
        # remove schedules under /services/time/local/TIM that have no app
        manager = as_node('/services/time/local')
        if manager.has_child('TIM'):
            try:
                sh = as_node('/services/time/local/TIM')
                name_header = 'RZSched_'
                # create list of RZSched_'s under the TIM node
                schedules = filter(
                    lambda k: k[:len(name_header)] == name_header,
                    sh.children_names())
                # compare appname after RZSched_, upto : with our children names
                orphans = filter(
                    lambda k: k.split('_')[1].split(':')[0] not in self.
                    children_names(), schedules)
                for o in orphans:
                    try:
                        sh.get_child(o).prune()
                        msglog.log('Graphical Control:',
                                   'pruned orphaned schedule: ', o)
                    except:
                        msglog.exception()
                if len(orphans):
                    sh.save_schedule()
            except:
                msglog.exception()

    def check_and_load_application_files(self, starting=0):
        app_reloaded = starting  #return value to signal that the children need to be started
        save_pdo = 0  #flag to control saving config data to pdo
        files = os.listdir(
            config_path)  #/var/mpx/config/services/control (usually)
        xml_filenames = []
        for f in files:
            if f.find('.xml') > 0 and len(f) == (
                    f.find('.xml') + 4
            ):  #any xml file in this folder is assumed to be a control app
                xml_filenames.append(f)
                modify_time = os.stat(config_path + f)[8]
                stale_pdo = True
                no_stats_pdo = True
                if f in self._pdo.stats_dict:  #check for change since last time
                    no_stats_pdo = False
                    if self._pdo.stats_dict[f][0] == modify_time:
                        stale_pdo = False  #old news, no change detected
                #self.stats_dict[f]=modify_time
                if starting or no_stats_pdo or (
                        stale_pdo
                        and ALLOW_APP_RELOAD):  #need to (re)load application
                    if app_reloaded == 0:  #only stop all application nodes for the first detected change
                        try:
                            self._status = 'Stopping %s' % (f, )
                            msglog.log(
                                self.as_node_url(), msglog.types.INFO,
                                'Stage 0:  Stop Application templates.')
                            for c in self.children_nodes():
                                if hasattr(c, '_stop_running_app'):
                                    c._stop_running_app()
                        except:
                            msglog.exception()
                    app_reloaded = 1  #no need to "stop" for any other app changes
                    self._status = 'Loading %s' % (f, )
                    try:
                        root = None
                        if not stale_pdo:  #so no change was detected, we are starting up the framework
                            try:  #to get the pickled config data rather than load the xml again
                                msglog.log(
                                    self.as_node_url(), msglog.types.INFO,
                                    'Stage 1:  XML unchanged.  Loading configuration data from PDO: %s'
                                    % (f, ))
                                root = cPickle.loads(
                                    self._pdo.stats_dict[f][1])
                            except:
                                msglog.exception()
                                msglog.log(
                                    self.as_node_url(), msglog.types.WARN,
                                    'Stage 1:  Unable to reload config data. Next, try XML file.'
                                )
                        if root is None:
                            msglog.log(
                                self.as_node_url(), msglog.types.INFO,
                                'Stage 1:  Parsing configuration xml file: %s'
                                % (f, ))
                            root = parse_xml(config_path + f)
                            self._pdo.stats_dict[f] = (modify_time,
                                                       cPickle.dumps(root))
                            save_pdo = 1
                            if f in self._stale_apps:
                                self._stale_apps.remove(f)
                        #now we have the root configuration.  Turn it into configured nodes
                        module = root.get_config().get('module', None)
                        if module == 'mpx.ion.rz.rzhost_node.RzhostNode':
                            load_rz_root(root, self)
                        elif module == 'mpx.service.control.graphical.ApplicationNode':
                            load_tim_root(root, self)
                        else:
                            raise EInvalidValue()
                    except Exception, e:
                        msglog.exception()
                        pass
                elif stale_pdo:
                    if not f in self._stale_apps:
                        msglog.log(
                            self.as_node_url(), msglog.types.INFO,
                            'Application %s has been modified, please restart the framework.'
                            % (f, ))
                        self._stale_apps.append(f)
                        try:
                            self.get_child(f.split('.')[0]).set_stale_flag()
                        except:
                            msglog.exception()

        #clear out any leftover pdo for  deleted files
        for k in self._pdo.stats_dict.keys():
            try:
                if k not in xml_filenames:
                    save_pdo = 1  #force save of modififed pdo
                    del self._pdo.stats_dict[k]
                    n = k.split('.xml')[0]
                    if self.has_child(
                            n):  # prune any running app who's file was deleted
                        try:
                            self.get_child(n).prune()
                            self.prune_orphaned_schedules()
                        except:
                            msglog.exception()
            except:
                pass
        if save_pdo:
            thread_pool.LOW.queue_noresult(self._save_pdo)
        return app_reloaded
Beispiel #25
0
class ExportersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        self._pdo_lock = Lock()
        self.manager = None
        super(ExportersConfigurator, self).__init__(*args)

    def configure(self, config):
        self.setattr('path', config.get('path', '/exportconfig'))
        self.setattr('container',
                     config.get('container', '/services/Alarm Exporters'))
        self.secured = as_internal_node("/services").secured
        super(ExportersConfigurator, self).configure(config)

    def configuration(self):
        config = super(ExportersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['container'] = self.getattr('container')
        return config

    def start(self):
        self.container = self.nodespace.as_node(self.container)
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.exporters = {}
            self._pdo.load()
            exporterdumps = self._pdo.exporters.values()
        finally:
            self._pdo_lock.release()
        super(ExportersConfigurator, self).start()
        tstart = time.time()
        for exporterdump in exporterdumps:
            IPickles(cPickle.loads(exporterdump))()
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Exporter Configurator loaded '
            '%s exporters in %s seconds.' % (len(exporterdumps), tlapse))
        self.manager = self.container

    def stop(self):
        super(ExportersConfigurator, self).stop()
        self.container = None

    def match(self, path):
        return path.startswith(self.path)

    security.protect('create_node', 'Configure')

    def create_node(self, name, config=()):
        config = dict(config)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        exporter = self.manager.nodespace.create_node(AlarmExporter)
        exporter.configure(config)
        exporter.start()
        self.updatepdo()
        return exporter.name

    security.protect('remove_node', 'Configure')

    def remove_node(self, name):
        exporter = self.manager.get_child(name)
        exporter.prune()
        self.updatepdo()
        return exporter.name

    security.protect('configure_node', 'Configure')

    def configure_node(self, name, config):
        exporter = self.manager.get_child(name)
        exporter.configure(config)
        self.updatepdo()
        return exporter.name

    security.protect('node_configuration', 'View')

    def node_configuration(self, name, extended=False):
        exporter = self.manager.get_child(name)
        return exporter.configuration()

    security.protect('configure_formatter', 'Configure')

    def configure_formatter(self, exporter, config):
        return self.configure_node(exporter, {"formatter": config})

    security.protect('formatter_configuration', 'View')

    def formatter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("formatter", {})

    security.protect('configure_transporter', 'Configure')

    def configure_transporter(self, exporter, config):
        return self.configure_node(exporter, {"transporter": config})

    security.protect('transporter_configuration', 'View')

    def transporter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("transporter", {})

    security.protect('trigger_configuration', 'View')

    def trigger_configuration(self, name=None):
        manager = self.nodespace.as_node('/services/Alarm Manager')
        sources = [manager] + manager.get_alarms()
        configuration = dict([(source.url, []) for source in sources])
        if name:
            exporter = self.manager.get_child(name)
            configuration.update(exporter.trigger_configuration())
        configs = []
        for source, events in configuration.items():
            configs.append({"source": source, "events": events})
        return configs

    security.protect('configure_triggers', 'Configure')

    def configure_triggers(self, name, triggers=()):
        configuration = {}
        for config in triggers:
            configuration[config["source"]] = config["events"]
        exporter = self.manager.get_child(name)
        exporter.configure_triggers(configuration)
        self.updatepdo()

    security.protect('get_node_names', 'View')

    def get_node_names(self):
        return self.manager.children_names()

    def updatepdo(self):
        exporters = {}
        self._pdo_lock.acquire()
        try:
            for exporter in self.manager.get_exporters():
                exporters[exporter.name] = cPickle.dumps(IPickles(exporter))
            self._pdo.exporters = exporters
            self._pdo.save()
        finally:
            self._pdo_lock.release()

    def handle_request(self, request):
        update_pdo = False
        response = Response(request)
        request_data = request.get_post_data_as_dictionary()
        request_data.update(request.get_query_string_as_dictionary())
        if request_data.has_key('add'):
            adapt = self.create_exporter("New Exporter")
        elif request_data.has_key('remove'):
            name = urllib.unquote_plus(request_data['remove'][0])
            self.remove_exporter(name)
            adapt = self.container
        elif request_data.has_key('edit'):
            name = urllib.unquote_plus(request_data['edit'][0])
            update_pdo = False
            adapt = self.container.get_exporter(name)
        elif request_data.has_key('configure'):
            name = urllib.unquote_plus(request_data['configure'][0])
            exporter = self.container.get_exporter(name)
            config = {'Exporter': {}, 'Formatter': {}, 'Transporter': {}}
            for attrname in request_data.keys():
                splitname = attrname.split('.')
                if len(splitname) == 2 and config.has_key(splitname[0]):
                    config[splitname[0]][splitname[1]] = urllib.unquote_plus(
                        request_data[attrname][0])
            exportconfig = config['Exporter']
            exportconfig['formatter'] = config['Formatter']
            exportconfig['transporter'] = config['Transporter']
            exporter.configure(exportconfig)
            update_pdo = True
            adapt = exporter
        else:
            adapt = self.container
        if request_data.has_key('actionName'):
            target = urllib.unquote_plus(request_data.get('target')[0])
            action = urllib.unquote_plus(request_data.get('actionName')[0])
            params = map(urllib.unquote_plus, request_data.get('params'))
            exporter = self.container.get_exporter(target)
            method = getattr(exporter, action)
            result = method(*params)
            update_pdo = True
        if update_pdo:
            self.updatepdo()
        webadapter = IWebContent(adapt)
        response.send(webadapter.render())
class EnergywiseManager(CompositeNode):
    def __init__(self):
        CompositeNode.__init__(self)
        self._pdo_lock = Lock()
        self._pdo = None
        self.__running = False
        self.debug = 0
        return

    def configure(self, config):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO,
                       'Inside configure')
        CompositeNode.configure(self, config)
        set_attribute(self, 'debug', 0, config, int)
        return

    def configuration(self):
        config = CompositeNode.configuration(self)
        get_attribute(self, 'debug', config, str)
        return config

# def configure_trend_in_switches(self, start_node, frequency):
#    for child in start_node.children_nodes():
#       if child.children_nodes():
#          self.configure_trend_in_switches(child, frequency)
#     else:
# reached upto leaf, each energywise switch has trends as child
#        child.new_trend(frequency)
#return

    def delete_trend_configuration(self, trend_domain):
        self._pdo_lock.acquire()
        try:
            if self._pdo.trends.has_key(trend_domain):
                # stop logging as well
                del self._pdo.trends[trend_domain]
            self._pdo.save()
        finally:
            self._pdo_lock.release()
        return

    def delete_trends(self, trendList):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO,
                       'Inside delete_trends')

        for domain in trendList.split(':'):
            if domain:
                domain_node = as_node(domain)
                domain_node.delete_trend()
                self.delete_trend_configuration(domain)
        return

    def start(self):
        if self.__running:
            return
        if self.debug:
            msglog.log('EnergywiseManager :', msglog.types.INFO,
                       'Inside start')
        CompositeNode.start(self)
        #        start_node = as_node('/services/EnergywiseManager/')
        #        self.configure_trend_in_switches(start_node, 60)
        self.__running = True
        self._pdo_lock.acquire()
        self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
        self._pdo.trends = {}
        self._pdo.load()
        self._pdo_lock.release()
        if self.has_child('trends'):
            self.trends = self.get_child('trends')
        else:
            self.trends = CompositeNode()
            self.trends.configure({'parent': self, 'name': 'trends'})
            self.trends.start()
        # start trending for saved domains
        for domain, freq in self._pdo.trends.items():
            try:
                start_node = as_node(domain)
                # self.configure_trend_in_switches( start_node,freq )
                start_node.new_trend(freq)
            except:
                self.delete_trend_configuration(domain)
        return

    def get_trends(self):
        return self._pdo.trends.items()

    def add_trend_configuration(self, trend_period, trend_domain):
        self._pdo_lock.acquire()
        self._pdo.trends[trend_domain] = trend_period
        self._pdo.save()
        self._pdo_lock.release()
        return

    def save_trends(self, trend_list):
        # Traverse through _pdo.items and check if new domain is either subset
        # of any configured or superset.
        # If subset return with msg already covered and dont save this
        # If superset then configure new ones and delete subset from
        # _pdo.items
        '''Adding and saving trends'''
        for point in reversed(trend_list):
            point_period = point['frequency']
            point_domain = point['domain']
            for saved_domain, saved_period in tuple(self._pdo.trends.items()):
                if saved_domain == point_domain:
                    if saved_period != point_period:
                        self.delete_trend_configuration(saved_domain)
                        break
            if not self._pdo.trends.has_key(point_domain):
                # add this trend
                try:
                    domain_node = as_node(point_domain)
                    if isinstance(domain_node, EnergywiseSwitch) or isinstance(
                            domain_node, EnergywiseDomain):
                        self.add_trend_configuration(point_period,
                                                     point_domain)
                        domain_node.new_trend(point_period)
                except Exception:
                    msglog.exception()
                    msglog.log(
                        "Energywise", msglog.types.ERR,
                        "Failed to create trend for %r every %r seconds" %
                        (point_domain, point_period))
        return

    def stop(self):
        CompositeNode.stop(self)
        self.__running = False
        return
class GarbageCollector(ServiceNode):
    def __init__(self):
        ServiceNode.__init__(self)
        self.debug = 0
        self._registered = ()
        self._did_save = 0
        self._post_configuration=0

        if self.debug: print 'Initialized _registered to [] for %s.' % self
    def singleton_unload_hook(self):
        return
    ##
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    def configuration(self):
        if self.debug: print 'In GarbageCollector:configuration().'
        config = ServiceNode.configuration(self)
        return config
    ##
    #   starts the data manager service
    # @return None
    def start(self):
        from mpx.lib.persistent import PersistentDataObject
        
        ServiceNode.start(self)
        if self.debug: print 'Garbage Collector Starting!'

        self._data = PersistentDataObject(self,dmtype=GC_NEVER)
  
        self._data.registered = []
        self._data.load()
        
        if self.debug: print 'GC Data is %s.' % self._data
        if self.debug: print 'GC Data._reg is %s.' % self._data.registered

    ##
    #   stops the data manager service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # set_faillist is the hook which allows the system to inform the data
    # manager about which nodes failed to start up.  Each list item should
    # be a dictionary with the following members:
    # name - the name of the node (without parent information)
    # parent - the parent of the node (with any relevant parent information,
    #          e.g. /services/com1
    # type - what type of failure occured.  Acceptable values are
    #        load and config.
    def set_faillist(self, faillist):
        if self.debug: print 'Got faillist of %s.' % faillist
        if self.debug: print 'Got reglist of %s.' % self._registered

        old_registered = self._data.registered[:]

        # By now, everyone should have had a chance to start up.
        # @fixme (real soon now, I promise):  Use the cool new method that
        # Mark and Shane suggested to consume an event from the root node
        # when all nodes have been started as a trigger for starting
        # the garbage collection process.
        self._data.registered = list(self._registered)
  
        # OK, now process our lists and see who is naughty and who is
        # nice.
        if self.debug: print '---- Starting to Process Potential Reaping List ----'
        for regnode in old_registered:
            purge_type = regnode['type']
            filename = regnode['filename']
            nodename = regnode['nodename']
            
            # If we are never supposed to purge this data, then don't bother
            # to do any more checking
            if purge_type == GC_NEVER:
                if self.debug: print '%s: Skipping because it is GC_NEVER.' % nodename
                continue

            if self.debug: print '%s: Checking.' % nodename
            
            node_did_register = 0
            node_did_fail = 0
            node_did_fail_on_config = 0
            node_did_fail_on_load = 0
            node_did_fail_on_start = 0
            parent_did_fail = 0
            should_purge = 0

            # If this node has registered with us, then we assume that
            # it started up and is present, etc.  This might not always
            # be the correct thing to do, but for now it seems like the
            # correct enough thing to do and should keep performance
            # from becoming an issue.
            if regnode in self._registered:
                if self.debug: print '%s: Appears to be happy.' % nodename
                node_did_register = 1
            else:
                # Check to see if the node or one of it's ancestors failed
                for failnode in faillist:
                    curnode = failnode['name']
                    curpare = failnode['parent']
                    curtype = failnode['type']
                    if curpare == '/':
                        curpath = curpare + curnode
                    else:
                        curpath = curpare + '/' + curnode
                    if self.debug: print 'curpath is %s and nodename is %s.' % (curpath, nodename)
                    if nodename == curpath:
                        if self.debug: print 'We got a match, %s failed because of %s.' % (
                            nodename, curtype)
                        if curtype == 'load':
                            node_did_fail_on_load = 1
                        elif curtype == 'config':
                            node_did_fail_on_config = 1
                        else:
                            raise 'Unrecognized failure type: %s.' % curtype
                        # Don't need to check any further
                        break
                    else:
                        if self._path_is_parent(curpath, nodename):
                            if self.debug: print 'We found a parent who failed: %s.' % curpath
                            parent_did_fail = 1
                            # Don't need to check any further
                            break                        
                if node_did_fail_on_config or node_did_fail_on_load:
                    node_did_fail = 1

            # If the node didn't fail in load or config, but it didn't register either,
            # then check to see if perhaps it exists, but didn't start.  We detect this
            # by doing an as_node on it.  If this succeeds, we can check the node's state.
            # If it doesn't succeed, then we can pretty safely assume that the node
            # has been delete (or, unfortunately, is auto-discovered).  
            if not node_did_fail and not node_did_register:
                try:
                    x = as_node(nodename)
                    node_did_fail_on_start = 1
                    node_did_fail = 1
                    if self.debug: print 'We were able to instantiate node: %s.' % nodename
                except:
                    if self.debug: print 'Failed to instantiate node: %s.' % nodename
                    # The node doesn't seem to exist at all.  Let the following code
                    # draw the appropriate conclusions.
                    pass
            
            if not node_did_register:
                if self.debug: print 'node_did_fail_on_load: %d.' % node_did_fail_on_load
                if self.debug: print 'node_did_fail_on_config: %d.' % node_did_fail_on_config
                if self.debug: print 'node_did_fail_on_start: %d.' % node_did_fail_on_start
                if self.debug: print 'node_did_fail: %d.' % node_did_fail
                if self.debug: print 'parent_did_fail: %d.' % parent_did_fail
                if self.debug: print 'purge_type: %d.' % purge_type
                
                # OK, the node didn't register.  Check to see what we've
                # been told to do in this case.
                if node_did_fail and (purge_type == GC_ONFAILURE):
                    should_purge = 1

                # For now, purge even if it was a parent who failed and purge_type
                # is GC_ONFAILURE.  @fixme: We need to think about if this is what
                # we want to do or not.
                if parent_did_fail and (purge_type == GC_ONFAILURE):
                    should_purge = 1

                # If the node did not register and neither it nor a parent
                # failed to start, then we assume that it has been deleted.
                # Note: This does not seem to be correct for auto-discovered
                #       nodes, so we need a better way of detecting this case.
                if (not node_did_fail) and (not parent_did_fail) and (purge_type == GC_ONDELETE):
                    should_purge = 1

            # If the node did not register and we aren't going to purge it, then
            # save it's registration information so that if circumstances change,
            # we can consider purging it at some later date.
            if (not node_did_register) and (not should_purge):
                if self.debug: print '%s did not register, but we are registering for it.' % nodename
                self._data.registered.append(regnode)
                        
            # OK, we've figured out that we should purge this persistent
            # data.  Go ahead and do so.
            if should_purge:
                if os.access(filename, os.F_OK):
                    if self.debug: print 'We decided we should purge the following file: %s.' % filename
                    msglog.log('garbage_collector',msglog.types.INFO,
                           'Purging the following persistent data file: %s on behalf of %s.' % (filename,
                                                                                                nodename))
                    try:
                        os.remove(filename)
                    except:
                        msglog.log('garbage_collector',msglog.types.INFO,
                                   'Got exception trying to remove persistent data: %s.' % filename)
                        msglog.exception('garbage_collector')
            else:
                if self.debug: print '%s: Will SAVE the following file: %s.' % (nodename, filename)


        if self.debug: print '---- Done Processing Potential Reaping List ----'
        
        # Now, at long last, persist our own data.
        self._data.save()
        self._did_save = 1
        self._post_configuration=1

    def register(self, nodename, filename, type=None):
        # Default to GC_ONDELETE
        if type == None:
            type = GC_ONDELETE
        if self.debug: print '%s: Registered with type of %d.' % (nodename, type)
        if self._post_configuration:
            self._data.registered+=(
                {'nodename':nodename, 'filename':filename, 'type':type},
                )
        else:
            self._registered += (
                {'nodename':nodename, 'filename':filename, 'type':type},
                )

        # If we have already saved our data, but just received a new registration,
        # then save it again.
        if self._did_save:
            self._data.save()

    def _path_is_parent(self, path, node):
        # If they are the same, we aren't talking a parent/child relationship here
        if path == node:
            return 0
        strind = string.find(node, path)
        if strind == -1:
            return 0
        if strind == 0:
            return 1
        # If we got something other than -1 or 0 here, strange things are
        # happening.  Dump a message to msglog so that whatever is wrong
        # can be fixed.
        msglog.log(
            'garbage_collector',msglog.types.INFO,
            '_path_is_parent: Found %s at a weird spot in %s.' % (node,
                                                                  parent)
            )
        return 1
    ##
    # Return a tuple of dict()s describing all the registered PDOs.
    # @note DO NOT MODIFY THE DICT()s IN THE TUPLE!
    def registered_pdo_tuple(self):
        return self._registered
Beispiel #28
0
class Control(CompositeNode):
    ##
    # This attribute is used in the introspective generation
    # of configuration data.
    __module__ = mpx.service.control.__name__
    
    def __init__(self):
        CompositeNode.__init__(self)
        self._status = 'initialized'
        self._stale_apps = []
    def configure(self, config):
        self._pdo = PersistentDataObject(self)
        self._pdo.stats_dict = {} #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
        self._pdo.load()
        # write_priority can be set for the entire control service, though
        # it may be specialized at the individual application node level. 
        set_attribute(self, 'write_priority', 9, config, int)
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        self.pdo_file = self._pdo.filename()
        get_attribute(self, 'write_priority', config)
        get_attribute(self, 'pdo_file', config, str)
        return config
    def _save_pdo(self): #no locking needed since the load and save cannot overlap
        start_time = time.time()
        self._pdo.save()
        msglog.log(self.as_node_url(),msglog.types.INFO,
                   'Control service configuration data saved in: %s seconds' % (str(time.time() - start_time),))
    def start(self):
        self._status = 'starting'
        self.stats_dict = {} #clear out stats dict to force reload of app
        self.application_change_detector(1) #starting
    def _start(self):
        CompositeNode.start(self) #get the children ready for a trip...
        #now that the children are started, go back through the list and finish up the "graphical compile"
        for n in self.children_nodes():
            if n.hasattr('map_output_connections'):
                n.map_output_connections()
        for n in self.children_nodes():
            if n.hasattr('map_reference_output_connections'):
                n.map_reference_output_connections()
        for n in self.children_nodes():
            if n.hasattr('resolve_inputs'):
                n.resolve_inputs()
        for n in self.children_nodes():
            if n.hasattr('prepare_run_list'):
                n.prepare_run_list()
        for n in self.children_nodes():
            if n.hasattr('trigger_run_list'):
                n.trigger_run_list()
    def prune_orphaned_schedules(self):
        # remove schedules under /services/time/local/TIM that have no app
        manager = as_node('/services/time/local')
        if manager.has_child('TIM'):
            try:
                sh = as_node('/services/time/local/TIM')
                name_header = 'RZSched_'
                # create list of RZSched_'s under the TIM node
                schedules = filter(lambda k:k[:len(name_header)] == name_header, sh.children_names())
                # compare appname after RZSched_, upto : with our children names
                orphans = filter(lambda k:k.split('_')[1].split(':')[0] not in self.children_names(), schedules)
                for o in orphans:
                    try:
                        sh.get_child(o).prune()
                        msglog.log('Graphical Control:', 'pruned orphaned schedule: ', o)
                    except:
                        msglog.exception()
                if len(orphans):
                    sh.save_schedule()
            except:
                msglog.exception()

    def check_and_load_application_files(self, starting=0):
        app_reloaded = starting #return value to signal that the children need to be started
        save_pdo = 0 #flag to control saving config data to pdo
        files = os.listdir(config_path) #/var/mpx/config/services/control (usually)
        xml_filenames = []
        for f in files:
            if f.find('.xml') > 0 and len(f) == (f.find('.xml') + 4): #any xml file in this folder is assumed to be a control app
                xml_filenames.append(f)
                modify_time = os.stat(config_path + f)[8]
                stale_pdo = True
                no_stats_pdo = True
                if f in self._pdo.stats_dict: #check for change since last time
                    no_stats_pdo = False
                    if self._pdo.stats_dict[f][0] == modify_time:
                        stale_pdo = False #old news, no change detected
                #self.stats_dict[f]=modify_time
                if starting or no_stats_pdo or (stale_pdo and ALLOW_APP_RELOAD): #need to (re)load application
                    if app_reloaded == 0: #only stop all application nodes for the first detected change
                        try:
                            self._status = 'Stopping %s' % (f,)
                            msglog.log(self.as_node_url(),msglog.types.INFO,
                                'Stage 0:  Stop Application templates.')
                            for c in self.children_nodes():
                                if hasattr(c, '_stop_running_app'):
                                    c._stop_running_app()
                        except:
                            msglog.exception()
                    app_reloaded = 1 #no need to "stop" for any other app changes
                    self._status = 'Loading %s' % (f,)
                    try:
                        root = None
                        if not stale_pdo: #so no change was detected, we are starting up the framework
                            try: #to get the pickled config data rather than load the xml again
                                msglog.log(self.as_node_url(),msglog.types.INFO,
                                       'Stage 1:  XML unchanged.  Loading configuration data from PDO: %s' % (f,))
                                root = cPickle.loads(self._pdo.stats_dict[f][1])
                            except:
                                msglog.exception()
                                msglog.log(self.as_node_url(),msglog.types.WARN,
                                       'Stage 1:  Unable to reload config data. Next, try XML file.')
                        if root is None:
                            msglog.log(self.as_node_url(),msglog.types.INFO,
                                       'Stage 1:  Parsing configuration xml file: %s' % (f,))
                            root = parse_xml(config_path + f)
                            self._pdo.stats_dict[f] = (modify_time, cPickle.dumps(root))
                            save_pdo = 1
                            if f in self._stale_apps:
                                self._stale_apps.remove(f)
                        #now we have the root configuration.  Turn it into configured nodes
                        module = root.get_config().get('module', None)
                        if module == 'mpx.ion.rz.rzhost_node.RzhostNode':
                            load_rz_root(root, self)
                        elif module == 'mpx.service.control.graphical.ApplicationNode':
                            load_tim_root(root, self)
                        else:
                            raise EInvalidValue()
                    except Exception, e:
                        msglog.exception()
                        pass
                elif stale_pdo:
                    if not f in self._stale_apps:
                        msglog.log(self.as_node_url(), msglog.types.INFO,
                                   'Application %s has been modified, please restart the framework.' % (f,))
                        self._stale_apps.append(f)
                        try:
                            self.get_child(f.split('.')[0]).set_stale_flag()
                        except:
                            msglog.exception()
                            
        #clear out any leftover pdo for  deleted files
        for k in self._pdo.stats_dict.keys():
            try:
                if k not in xml_filenames:
                    save_pdo = 1 #force save of modififed pdo
                    del self._pdo.stats_dict[k]
                    n = k.split('.xml')[0]
                    if self.has_child(n): # prune any running app who's file was deleted
                        try:
                            self.get_child(n).prune()
                            self.prune_orphaned_schedules()
                        except:
                            msglog.exception()
            except:
                pass
        if save_pdo:
            thread_pool.LOW.queue_noresult(self._save_pdo)
        return app_reloaded