Example #1
0
class VirtualSubNet:
    def __init__(self, owner):
        self._lock = Lock()
        self._thread = None
        self.owner = owner
        return

    def start(self):
        self._lock.acquire()
        try:
            if not self._thread:
                t = VirtualSubNetThread(self.owner)
                t.start()
                # Add interlock for successful start-up... (wait on Q for
                # 10 seconds).
                self._thread = t
        finally:
            self._lock.release()
        return

    def stop(self):
        self._lock.acquire()
        try:
            if self._thread:
                self._thread.stop()
                self._thread = None
        finally:
            self._lock.release()
        return
Example #2
0
class XCommandIface(TcpConnection):
    def __init__(self, port, host, debug):
        self.__lock = Lock() # lock serializes XCommandIface messaging
        super(XCommandIface, self).__init__(port, host, debug)
        return
        
    def write(self, method_name, params):
        self.__lock.acquire()
        try:
            if not self.connection_ok():
                self.open_connection()
            # marshal data from param tuple
            data = xmlrpclib.dumps(tuple([params]), method_name)
            #payload is 4 byte, little endian, field representing the length of
            #the xml data, followed by the data
            msg = struct.pack('<I', len(data)) + data
            try:
                self._s.send(msg)
            except:
                msglog.log('Adura', ERR, 'Error writing to XCommand socket.')
                raise EConnectionError
            rslt = self.read()
        finally:
            self.close_connection()
            self.__lock.release()
            
    def read(self):
        # leading 4 bytes indicates length of xml-rpc response payload 
        read_len = struct.unpack('<I', self._s.recv(4, timeout=SOCK_OP_TIMEOUT))[0]
        # retreive and marshall the results.  If the xml-rpc packet represents a 
        # fault condition, loads() raises a Fault exception.  @fixme - need a 
        # better understanding of their normal result structure
        rslt = xmlrpclib.loads(self._s.recv(read_len, timeout=SOCK_OP_TIMEOUT))[0]
        return rslt
Example #3
0
class SocketMap(dict):
    def __init__(self, *args, **kw):
        dict.__init__(self, *args, **kw)
        self.__busy = False
        self.__lock = Lock()
        # The StubNotifier is used during the creation of the real
        # SocketMapNotifier().
        self.__notifier = StubNotifier()
        # Creating the SocketMapNotifier will add it to this SocketMap.
        self.__notifier = SocketMapNotifier(self)
        return
    def wakeup(self, force=False):
        self.__lock.acquire()
        force = force or self.__busy
        try:
            if force:
                self.__notifier.wakeup()
        finally:
            self.__lock.release()
        return
    def __delitem__(self,y):
        self.__lock.acquire()
        try:
            result = dict.__delitem__(self,y)
            if self.__busy:
                self.__notifier.wakeup()
            return result
        finally:
            self.__lock.release()
        raise EUnreachable()
    def __setitem__(self,i,y):
        self.__lock.acquire()
        try:
            result = dict.__setitem__(self,i,y)
            if self.__busy:
                self.__notifier.wakeup()
            return result
        finally:
            self.__lock.release()
        raise EUnreachable()
    def __invoke(self, func, *args):
        self.__lock.acquire()
        self.__busy = True
        self.__lock.release()
        try:
            result = apply(func, args)
        finally:
            self.__lock.acquire()
            self.__busy = False
            self.__lock.release()
        return result
    def poll(self, timeout=0.0):
        return self.__invoke(asyncore.poll, timeout, self)
    def poll2(self, timeout=0.0):
        return self.__invoke(asyncore.poll2, timeout, self)
    def poll3(self, timeout=0.0): 
        return self.__invoke(asyncore.poll3, timeout, self)
    def loop(self, timeout=30.0, use_poll=0):
        return self.__invoke(asyncore.loop, timeout, use_poll, self)
Example #4
0
class CANBus(ARMNode, AutoDiscoveredNode):

    def __init__(self):
        ARMNode.__init__(self)
        AutoDiscoveredNode.__init__(self)
        self._lock = Lock()
        self.conversion_list = {}
        self._queue = Queue()
        self.debug = 0
        self.running = 0
        self._start_called = 0
        self.devices = ''
        self.device_addresses = []
        self._been_discovered = 0

    def lock(self):
        self._lock.acquire()
    def unlock(self):
        self._lock.release()

    ##
    # @see node.ARMNode#configure
    #
    def configure(self,config):
        ARMNode.configure(self,config)

    
    def configuration(self):
        config = ARMNode.configuration(self)
        #get_attribute(self, 'devices', config)
        return config
        

    ##
    # start temperature conversions
    #
    def start(self):
        ARMNode.start(self)
        self.running = 0
    
    def stop(self):
        self.running = 0

    ##
    # discover and create object instances
    #
    def _discover_children(self, force=0):
        if force:
            self._been_discovered = 0
        if self.running == 1 and not self._been_discovered:
            pass
        return self._nascent_children
Example #5
0
class ExplicitSocketMap(dict):
    def __init__(self, *args, **kw):
        dict.__init__(self, *args, **kw)
        self.__lock = Lock()
        self.__notifier = SocketMapNotifier(self)
        return
    def wakeup(self):
        self.__lock.acquire()
        try:
            self.__notifier.wakeup()
        finally:
            self.__lock.release()
        return
Example #6
0
class UniqueID(PersistentDataObject):
    def __init__(self,node):
        self._lock = Lock()
        self.id = 0
        PersistentDataObject.__init__(self,node)
        self.load()
    def allocate_id(self):
        self._lock.acquire()
        try:
            id = self.id
            self.id += 1
            self.save('id')
        finally:
            self._lock.release()
        return id
Example #7
0
class _Lock:
    def __init__(self):
        self._minutes = 0
        self._lock = Lock()
        self._scheduled = None
        self._stack = None
    def acquire(self,blocking=0):        
        value = self._lock.acquire(blocking)
        self._stack = traceback.extract_stack()
        self._schedule_print()
        return value
    def release(self):
        try:
            if self._scheduled:
                self._scheduled.cancel()
        finally:
            self._lock.release()
    def locked(self):
        return self._lock.locked()
    def _schedule_print(self):
        self._scheduled = scheduler.after(60,self._print,(self._stack,))
    def _print(self,stack):
        self._minutes += 1
        print 'Lock acquired: %s min' % self._minutes
        print string.join(traceback.format_list(stack))
        if self.locked():
            self._schedule_print()                 
Example #8
0
 class SSL:
     def __init__(self,socket,*args):
         if not isinstance(socket,_Socket):
             raise EInvalidValue('socket',socket,
                                 'Must be mpx.lib.socket.socket')
         self._safety = 0
         if isinstance(socket,_SafetySocket):
             self._safety = 1
         self._socket = socket
         self._ssl = _original_ssl(socket._socket,*args)
         self._lock = Lock()
     def read(self,count=None,timeout=None):
         args = (count,)
         if count is None:
             args = ()
         if ((self._safety or timeout is not None) and 
             not self._socket._wait_readable(0)):
             blocking = self._socket.is_blocking()
             self._lock.acquire()
             try:
                 self._socket.setblocking(0)
                 try:
                     return self._ssl.read(*args)
                 except sslerror,why:
                     if why[0] not in (2,11) and blocking:
                         raise why
             finally:
                 self._socket.setblocking(blocking)
                 self._lock.release()
         else:
             return self._ssl.read(*args)
         if (self._socket._connected and not 
             self._socket._wait_readable(timeout)):
             raise ETimeout('Socket did not become readable')
         return self._ssl.read(*args)
     def write(self,data,timeout=None):
         if ((timeout is not None or self._safety) and 
             self._socket._connected):
             if not self._socket._wait_writable(timeout):
                 raise ETimeout('Socket did not become writable')
         self._lock.acquire()
         try:
             return self._ssl.write(data)
         finally:
             self._lock.release()
     def __getattr__(self,name):
         return getattr(self._ssl,name)
Example #9
0
class TunnelManager(CompositeNode):
    def __init__(self, *args):
        global PTY_DEVS
        self._lock = Lock()
        self._pty_devs = []
        self._ptys_allocated = 0
        module_lock.acquire()
        try:
            if PTY_DEVS is None:
                PTY_DEVS = []
                for major in 'wxyz':
                    for minor in '0123456789abcdef':
                        PTY_DEVS.append('/dev/pty%s%s' % (major, minor))
        finally:
            module_lock.release()
            
    def configure(self, config):
        # vcp_limit is a "hidden" attribute.
        set_attribute(self, 'vcp_limit', 64, config, int)
        CompositeNode.configure(self, config)
        
    def configuration(self):
        config = CompositeNode.configuration(self)
        get_attribute(self, 'vcp_limit', config, str)
        return config
    
    ##
    # Allocate a pseudo-terminal for use by the Port object.
    #
    # @return a string, ie. /dev/ptyr0. 
    def get_pty(self):
        global PTY_DEVS
        self._lock.acquire()
        try:
            while len(PTY_DEVS):
                pty = PTY_DEVS.pop()
                try:
                    # confirm that the pty is accessible.
                    fd = open(pty)
                    fd.close()
                    self._ptys_allocated += 1
                    return pty
                except:
                    pass
            raise EResourceError
        finally:
            self._lock.release()
Example #10
0
 class Consumer(EventConsumerAbstract):
     def __init__(self, *args, **kw):
         EventConsumerAbstract.__init__(self, *args, **kw)
         self.entries = []
         self.errors = []
         self.lock = Lock()
     def event_thread(self,event):
         # The values returned in the event:
         values = event.values
         # The column as read from the source Log instance:
         column_dict = event.source[event.seq]
         # A map of COLUMN_DICT keys to VALUES indexes.
         column_value_map = {
             'timestamp':0, 'reverse':1, 'c2':2, 'c3':3
             }
         # Validate that the list of values matches the actual column in
         # the log:
         for key,index in column_value_map.items():
             if not column_dict.has_key(key):
                 self.errors.append('column_dict has no %r key.' % key)
                 return
             if index >= len(values):
                 self.errors.append('Index(%r) >= len(values:%r).' %
                                    (index, len(values)))
                 return
             if column_dict[key] != values[index]:
                 self.errors.append(
                     'column_dict[%r]:%r != values[%r]:%r' % (
                     key, column_dict[key], index, values[index]))
                 return
         self.lock.acquire()
         try:
             # If any entries are left, the test will fail.
             self.entries.remove(values)
         except:
             # Also, if errors is not empty the test will fail.
             self.errors.append("Failed to find %r in entries." %
                                values)
         self.lock.release()
     def event_handler(self,event):
         t = Thread(target=self.event_thread, args=(event,))
         t.start()
         return
Example #11
0
class KwList(CircList):
    def __init__(self, length):
        super(KwList, self).__init__(length)
        self.__last_ts = time.time()
        self.__last_kwh = None
        self.__lock = Lock()
        return
        
    def add(self, kwh, ts):
        try:
            kwh = float(kwh)
        except:
            return
        if self.__last_kwh is None:
            self.__last_kwh = kwh
        self.__lock.acquire()
        try:
            if kwh < self.__last_kwh or ts < self.__last_ts:
                # either time shifted on us or our kwh rolled\reset.
                self.clear()
            self.__last_ts = ts
            self.__last_kwh = kwh
            self.append(KwEntry(kwh, ts))
        finally:
            self.__lock.release()
        return

    def moving_average(self):
        avg = None
        self.__lock.acquire()
        try:
            if len(self) >= 2:
                 s_kw_entry = self._data[0]
                 e_kw_entry = self._data[-1]
                 delta_s = e_kw_entry.get_ts() - s_kw_entry.get_ts()
                 delta_kwh = e_kw_entry.get_kwh() - s_kw_entry.get_kwh()
                 avg = delta_kwh / self._seconds_as_hours(delta_s)
        finally:
            self.__lock.release()
        return avg

    def _seconds_as_hours(self, seconds):
        return (seconds / 60.0) / 60.0
Example #12
0
class CachedValue(CompositeNode):
    def __init__(self):
        self._lock = Lock()
        self._value = _Value()
        CompositeNode.__init__(self)
    def configure(self,config):
        CompositeNode.configure(self,config)
        set_attribute(self, 'expires_after', 0, config, float)
        set_attribute(self, 'node', self.parent, config, as_node)
    def configuration(self):
        config = CompositeNode.configuration(self)
        get_attribute(self, 'expires_after', config, str)
        get_attribute(self, 'node', config, as_node_url)
        return config
    def get(self, skipCache=0):
        self._lock.acquire()
        try:
            if self._value.age() > self.expires_after:
                self._value.set(self.node.get())
        finally:
            self._lock.release()
        return self._value.get()
Example #13
0
class mytime(object):
    def __init__(self, systime):
        self._skewlock = Lock()
        self._skewdetected = Event()
        self.base_time_time = systime
        self.base_uptime_time = uptime.secs()
    def notify_detected(self):
        self._skewlock.acquire()
        self._skewdetected.set()
        self._skewlock.release()
    def await_detection(self, timeout = None):
        self._skewlock.acquire()
        self._skewdetected.clear()
        self._skewlock.release()
        return self._skewdetected.wait(timeout)
    def time(self):
        dtime = uptime.secs() - self.base_uptime_time
        comp_time = dtime + self.base_time_time
        if debug:
            print 'mytime.time() returning %f.' % comp_time
        return comp_time
    def skew_base_time(self, skew):
        self.base_time_time = self.base_time_time + skew
Example #14
0
class AlarmLogger(ServiceNode):
    def __init__(self):
        self.__lock = Lock()
        ServiceNode.__init__(self)
    def configure(self,config):
        ServiceNode.configure(self,config)
        set_attribute(self,'log',REQUIRED,config)
    def configuration(self):
        config = ServiceNode.configuration(self)
        get_attribute(self,'log',config,as_node_url)
        return config
    def start(self):
        self.log = as_node(self.log)
        ServiceNode.start(self)
    def stop(self):
        self.log = as_node_url(self.log)
        ServiceNode.stop(self)
    def export(self,alarm):
        self.__lock.acquire()
        try:
            self.log.add_entry([time.time(),alarm.source.name,alarm.timestamp,
                                alarm.critical,alarm.values,alarm.message])
        finally:
            self.__lock.release()
Example #15
0
class XCommandIface(TcpConnection):
    def __init__(self, port, host, debug):
        self.__lock = Lock()  # lock serializes XCommandIface messaging
        super(XCommandIface, self).__init__(port, host, debug)
        return

    def write(self, method_name, params):
        self.__lock.acquire()
        try:
            if not self.connection_ok():
                self.open_connection()
            # marshal data from param tuple
            data = xmlrpclib.dumps(tuple([params]), method_name)
            #payload is 4 byte, little endian, field representing the length of
            #the xml data, followed by the data
            msg = struct.pack('<I', len(data)) + data
            try:
                self._s.send(msg)
            except:
                msglog.log('Adura', ERR, 'Error writing to XCommand socket.')
                raise EConnectionError
            rslt = self.read()
        finally:
            self.close_connection()
            self.__lock.release()

    def read(self):
        # leading 4 bytes indicates length of xml-rpc response payload
        read_len = struct.unpack('<I',
                                 self._s.recv(4, timeout=SOCK_OP_TIMEOUT))[0]
        # retreive and marshall the results.  If the xml-rpc packet represents a
        # fault condition, loads() raises a Fault exception.  @fixme - need a
        # better understanding of their normal result structure
        rslt = xmlrpclib.loads(self._s.recv(read_len,
                                            timeout=SOCK_OP_TIMEOUT))[0]
        return rslt
Example #16
0
    class _CriticalData:
        def __init__(self):
            self._lock = Lock()
            self._cond = Condition(self._lock)
            self.state = ConnectionMixin.IDLE
            self.connection_count = 0
        
        def acquire(self):
            return self._lock.acquire()
        
        def release(self):
            return self._lock.release()
        
        def wait(self, timeout=None):
            if timeout != None:
                self._cond.wait(timeout)
            else:
                self._cond.wait()
                
        def notify(self):
            self._cond.notify()

        def increment_connection_count(self):                        
            self.connection_count += 1
            self.state = ConnectionMixin.UP
            return self.connection_count
    
        def decrement_connection_count(self):
            self.connection_count -= 1
            if self.connection_count <1 :
                self.state = ConnectionMixin.DOWN
            
        def get_state(self):
            return self.state
        
        def set_state(self,state):
            self.state = state
Example #17
0
    class _CriticalData:
        def __init__(self):
            self._lock = Lock()
            self._cond = Condition(self._lock)
            self.state = ConnectionMixin.IDLE
            self.connection_count = 0

        def acquire(self):
            return self._lock.acquire()

        def release(self):
            return self._lock.release()

        def wait(self, timeout=None):
            if timeout != None:
                self._cond.wait(timeout)
            else:
                self._cond.wait()

        def notify(self):
            self._cond.notify()

        def increment_connection_count(self):
            self.connection_count += 1
            self.state = ConnectionMixin.UP
            return self.connection_count

        def decrement_connection_count(self):
            self.connection_count -= 1
            if self.connection_count < 1:
                self.state = ConnectionMixin.DOWN

        def get_state(self):
            return self.state

        def set_state(self, state):
            self.state = state
Example #18
0
class _UserDictionary(PersistentDataObject):
    def __init__(self):
        PersistentDataObject.__init__(self,'mpx.lib.user._UserDictionary')
        self.users = {}
        self.__lock = Lock()
    def has_key(self,name):
        self.__lock.acquire()
        try:
            return self.users.has_key(name)
        finally:
            self.__lock.release()
    def __getitem__(self,name):
        self.__lock.acquire()
        try:
            return self.users[name]
        finally:
            self.__lock.release()
    def __setitem__(self,name,key):
        self.__lock.acquire()
        try:
            self.users[name] = key
            self.save()
        finally:
            self.__lock.release()
Example #19
0
class PeriodicExporter(Exporter, EventConsumerMixin):
    def __init__(self):
        Exporter.__init__(self)
        EventConsumerMixin.__init__(self, self.handle_connected, self.connection_event_error)
        self.running = 0
        self._scheduled = None
        self._lock = Lock()

    def handle_connected(self, event):
        self.msglog("%s Got connection event" % self.name)
        if event.__class__ == ConnectionEvent:
            self.msglog("Connection state is %s." % str(event.state))
            if event.state == ConnectionEvent.STATE.UP:
                self.msglog("Going to start export.")
                self.go()
        else:
            msg = (
                "Unknown event recieved by %s from %s." % (self.name, str(self.connection_node))
            ) + " Event: %s" % str(event)
            msglog.log("broadway", msglog.types.WARN, msg)

    def connection_event_error(self, exc, event):
        msg = (
            "Connection Event for "
            + str(self.connection_node)
            + " had the following Error\n"
            + "Event: "
            + str(event)
            + "Error: "
            + str(exc)
        )
        msglog.log("broadway", msglog.types.WARN, msg)

    def msglog(self, msg, force=0):
        if self.debug or force:
            msglog.log("broadway.mpx.service.data.periodic_exporter", msglog.types.DB, msg)

    def configure(self, config):
        map_to_attribute(self, "period", 900, config, map_to_seconds)
        if self.period == 0:
            raise EInvalidValue("period", self.period, "Export period cannot be 0")
        set_attribute(self, "debug", 0, config, as_boolean)
        set_attribute(self, "synchronize_on", "00:00", config)
        set_attribute(self, "timeout", 60, config, int)
        set_attribute(self, "connection_node", "/services/network", config)
        set_attribute(self, "connection_attempts", 3, config, int)
        set_attribute(self, "always_export", 0, config, as_boolean)
        set_attribute(self, "breakup_on_period", 0, config, as_boolean)
        Exporter.configure(self, config)
        self._time = _TimeStore(self)

    def configuration(self):
        config = Exporter.configuration(self)
        map_from_attribute(self, "period", config, map_from_seconds)
        get_attribute(self, "connection_node", config)
        get_attribute(self, "debug", config, str)
        get_attribute(self, "connection_attempts", config)
        get_attribute(self, "timeout", config)
        get_attribute(self, "always_export", config, str)
        get_attribute(self, "synchronize_on", config)
        get_attribute(self, "breakup_on_period", config, str)
        return config

    def start(self):
        Exporter.start(self)
        if not self.running:
            node = as_node(self.connection_node)
            if hasattr(node, "event_subscribe"):
                node.event_subscribe(self, ConnectionEvent)
            else:
                if self.debug:
                    msg = "Connection node: " + str(self.connection_node) + " is not an event producer."
                    msglog.log("broadway", msglog.types.INFO, msg)
            self.connection = node
            self.running = 1
            self._init_next_time()
            self._schedule()
        else:
            raise EAlreadyRunning

    def stop(self):
        self.running = 0
        if self._scheduled is not None:
            try:
                self._scheduled.cancel()
            except:
                pass
        Exporter.stop(self)

    def go(self, end_time, start_time=None):
        if self._lock.locked():
            msglog.log("broadway", msglog.types.WARN, "Last export still active, skipping current request.")
            return
        Exporter_ThreadPool.queue_noresult(self._complete, end_time, start_time)

    def scheduled_time(self):
        return self.next_time() - self.period

    def last_time(self):
        return self._time.get_last_time()

    def _schedule(self):
        next = self.next_time()
        self._scheduled = scheduler.at(next, self.go, (next,))

    def _complete(self, end_time, start_time=None):
        self._lock.acquire()
        try:
            self._export(end_time, start_time)
        except:
            msglog.exception()
        self._lock.release()
        if self.running:
            self._schedule()

    ##
    #
    def _init_next_time(self):
        time_format = "%Y%m%d %H:%M:%S"
        sync_format = "%Y%m%d " + self.synchronize_on + ":00"
        current_time = int(time.time())
        f_sync = time.strftime(sync_format, self.time_function(current_time))
        f_now = time.strftime(time_format, self.time_function(current_time))
        sync = time.mktime(time.strptime(f_sync, time_format))
        now = time.mktime(time.strptime(f_now, time_format))
        if now > sync:
            # sync time in past, add one day to sync time.
            sync += map_to_seconds({"days": 1})
        gap = sync - now
        if self.period > gap:
            sync_time = current_time + gap
        else:
            sync_time = current_time + (gap % self.period)
        #
        #
        #
        self._next_time = sync_time
        return self._next_time

    def next_time(self):
        current_time = time.time()
        while self._next_time < current_time:
            self._next_time += self.period
        return self._next_time

    def data_since_export(self):
        start_time = self.last_time()
        end_time = self.next_time()
        return self.log.get_slice("timestamp", start_time, end_time)

    def formatted_data_since_export_as_string(self):
        length = 0
        stream = self.formatted_data_since_export()
        text = stream.read(1024)
        while len(text) > length:
            length = len(text)
            text += stream.read(1024)
        return text

    def formatted_data_since_export(self):
        return self.formatter.format(self.data_since_export())

    def export_data_since_export(self):
        return self.transporter.transport(self.formatted_data_since_export())

    def _export(self, end_time, start_time=None):
        attempts = 0
        connected = 0
        while attempts < self.connection_attempts:
            self.msglog("Acquiring connection %s." % str(self.connection_node))
            try:
                connected = self.connection.acquire()
            except:
                msglog.exception()
            if connected:
                self.msglog("Connection acquired")
                break
            attempts += 1
            self.msglog("Connection acquire failed %s times." % attempts)
        else:
            raise EConnectionError("Failed to connect %s times" % attempts)
        try:
            if start_time is None:
                start_time = self.last_time()
            if start_time == 0 and self.breakup_on_period:
                self.msglog("Start Time is 0 and set to Break on Transfer.")
                start_time = self.log.get_first_record()["timestamp"]
                self.msglog("Start Time set to timestamp of first row: %s" % time.ctime(start_time))
            retrieve = self.log.get_slice
            if self.log.name == "msglog":
                msglog.log("msglog.exporter", msglog.types.INFO, "repr(mpx.properties)\n%s\n" % (repr(properties)))
                retrieve = self.log.get_range
                end_time = time.time()
            end = end_time
            if self.breakup_on_period:
                self.msglog("Breaking on period")
                end = start_time + self.period
            self.msglog("Full export of slice from %s to %s" % (time.ctime(start_time), time.ctime(end_time)))
            while start_time != end_time:
                if end > end_time:
                    self.msglog("End greater than End Time.  Resetting to End Time")
                    end = end_time
                self.msglog("Going to export slice from %s to %s" % (time.ctime(start_time), time.ctime(end)))
                data = retrieve("timestamp", start_time, end)
                if (not data) and (not self.always_export):
                    raise ENoData("timestamp", start_time, end)
                self.msglog("Sending data to formatter.")
                try:
                    output = self.formatter.format(data)
                    if not output is None:
                        self.msglog("Sending formatted data to transporter.")
                        self.transporter.transport(output)
                    start_time = end
                except EBreakupTransfer, e:
                    entry = e.break_at
                    if entry["timestamp"] == end:
                        # prevents loop where transporter is just failing.
                        raise EIOError("EBreakupTransfer not progressing.")
                    end = entry["timestamp"]
                    msglog.log("broadway", msglog.types.WARN, "Breaking up data transfer at %s." % time.ctime(end))
                else:
                    self._time.set_last_time(start_time)
                    self.msglog("Data transported")
                    end = start_time + self.period
        finally:
            if hasattr(self.formatter, "cancel"):
                self.formatter.cancel()  # prevent mult copies of data at next successful transport
            if connected:
                self.msglog("Releasing connection.")
                self.connection.release()

    def nodebrowser_handler(self, nb, path, node, node_url):
        html = nb.get_default_view(node, node_url)
        html += "<h4>Commands</h4>\n"
        s = "%s?action=invoke&method=do_export" % self.name
        html += '<a href="%s">Force export via nodebrowser.</a>' % (s,)
        return html

    def do_export(self, end_time=None, start_time=None):
        if end_time is None:
            end_time = time.time()
        self.go(end_time, start_time)
        return "Export triggered."
Example #20
0
class DRASManager(RemoteWebServiceProxy):
    def __init__(self):
        super(DRASManager, self).__init__()
        self.__scheduled = None
        self.__observers = {}
        self.__lock = Lock()
        self.running = 0
        return

    def configure(self, cd):
        super(DRASManager, self).configure(cd)
        set_attribute(self, 'poll_freq', 60, cd, int)
        set_attribute(self, 'debug', 0, cd, int)
        return

    def configuration(self):
        cd = super(DRASManager, self).configuration()
        get_attribute(self, 'poll_freq', cd)
        get_attribute(self, 'debug', cd)
        return cd

    def start(self):
        super(DRASManager, self).start()
        if self.debug > 1:
            # dump soap messages to the console
            self._set_soap_debug_lvl(1)
        if not self.running:
            self.running = 1
            self._trigger_queue()
        return

    def stop(self):
        self.running = 0
        super(DRASManager, self).stop()
        return

    def _set_soap_debug_lvl(self, lvl):
        self._proxy.soapproxy.config.dumpSOAPIn = lvl
        self._proxy.soapproxy.config.dumpSOAPOut = lvl
        return

    def register(self, soap_func, obj):
        self.__lock.acquire()
        try:
            callback_list = self.__observers.get(soap_func)
            if callback_list is None:
                callback_list = []
                self.__observers[soap_func] = callback_list
            if not obj in callback_list:
                callback_list.append(obj)
        finally:
            self.__lock.release()
        return

    def unregister(self, soap_func, obj):
        self.__lock.acquire()
        try:
            callback_list = self.__observers.get(soap_func)
            if callback_list is None:
                return
            try:
                callback_list.remove(obj)
            except ValueError:
                # it's already gone
                pass
        finally:
            self.__lock.release()
        return

    def _do_poll(self):
        if self.debug:
            msglog.log('DRAS', INFO, 'Polling the demand response server')
        for soap_func, callback_list in self.__observers.items():
            for obj in callback_list:
                args = obj.get_args()
                try:
                    if args:
                        value = soap_func(*args)
                    else:
                        value = soap_func()
                except:
                    # SOAP errors live here
                    if self.debug:
                        msglog.log('DRAS', INFO,
                                   'Error polling the demand response server')
                        msglog.exception()
                    value = ETimeout()
                obj.update(value)
        self._schedule()
        return

    def _trigger_queue(self):
        NORMAL.queue_noresult(self._do_poll)
        return

    def _schedule(self):
        if self.running:
            self.poll_freq
            self._scheduled = scheduler.seconds_from_now_do(
                self.poll_freq, self._trigger_queue)
        return
Example #21
0
class ActiveProxyAbstractClass(EventConsumerMixin):
    def __init__(self):
        self._link = None #get 'actual' node
        self.link = None
        self._proxy_get = None #set to actual's preferred get method
        self._proxy_set = None
        self._proxy_start_exception = None
        self._proxy_sid = None
        self.proxy_direction = GET_ONLY #direction subscription "pushes" the data
        self._proxy_active_source = None
        self._proxy_active_destination = None
        self._proxy_active_lock = Lock()
        self._proxy_active_thread_lock = Lock()
        self._proxy_active_event = None
        self._proxy_trigger_counter = 0
        self._retry_win_high = 30
        EventConsumerMixin.__init__(self, self.change_of_value)
        self.debug = debug
    def configure(self, cd):
        set_attribute(self, 'link', None, cd, str)
        set_attribute(self, 'error_response', '%ERROR%', cd, str)  #keywords: %ERROR% %NONE% or desired value
        set_attribute(self, 'proxy_direction', self.proxy_direction, cd, int)
    def configuration(self, cd=None):
        if cd is None:
            cd = {}
        get_attribute(self, 'link', cd, str)
        get_attribute(self, 'error_response', cd, str)
        get_attribute(self, 'proxy_direction', cd, str)
        return cd
    ##
    def start(self):
        self._proxy_start_exception = None
        if self.is_proxy():
            self._proxy_start_active_mode()

    def stop(self):
        if self._proxy_sid is not None:
            SM.destroy(self._proxy_sid)
            self._proxy_sid = None
        return
    
    ##
    # start up subscription service if we are in active mode
    # keep trying until we are successful
    # todo: thread safe?
    def _proxy_start_active_mode(self):
        if self.link:
            try:
                if self._proxy_sid is None: #have not started subscription service yet
                    if self.proxy_direction == GET_ONLY:
                        self._proxy_active_source = self._proxy_linked_node()
                        if self._proxy_active_source is None:
                            raise ENotStarted()
                        self._proxy_active_destination = self
                    else: #SET_ONLY
                        self._proxy_active_source = self
                        self._proxy_active_destination = self._proxy_linked_node()
                        if self._proxy_active_destination is None:
                            raise ENotStarted()
                    self._proxy_active_queue = Queue()
                    self._proxy_sid = SM.create_delivered(self, {1:self._proxy_active_source})
                    if self.debug: print 'Active proxy %s started successfully' % (self.name)
            except:
                #it didn't work.  Setup schedule to try again in x seconds.  
                if self._retry_win_high < 90:
                    self._retry_win_high += 1
                retry_in = randint(int(self._retry_win_high * .66), self._retry_win_high)
                scheduler.seconds_from_now_do(retry_in, self._proxy_start_active_mode)
                #raise  #took this out since it mostly just served to force the scheduler tread to restart
                if self.debug: msglog.exception()

    def change_of_value(self, event):
        #print 'proxy change of value event'
        self._last_event = event
        #if not isinstance(event, ChangeOfValueEvent):
            #return
        # Insert event into queue, and (automatically) notify _event_handler_thread:
        trigger = 0
        self._proxy_active_lock.acquire()
        try:
            self._proxy_active_event = event # save only latest event, throw away older values
            if self._proxy_trigger_counter < 3: #no point in trigger too many times for noisy inputs
                self._proxy_trigger_counter += 1
                trigger = 1
        finally:
            self._proxy_active_lock.release()  #could this line go below the next to reduce triggers to thread?
        if trigger:
            #print 'proxy trigger'
            self._proxy_trigger_queue()

    def _proxy_trigger_queue(self):
        #print 'proxy triggerED'
        # run the set function on a thread pool thread:
        NORMAL.queue_noresult(self.proxy_active_set, self)
        return
    def proxy_active_set(self, dummy):
        #print 'active proxy event'
        self._proxy_active_thread_lock.acquire() #only one set at a time is active
        try:
            try:
                event = None
                self._proxy_active_lock.acquire()
                try:
                    event = self._proxy_active_event
                    self._proxy_active_event = None
                    if self._proxy_trigger_counter:
                        self._proxy_trigger_counter -= 1
                finally:
                    self._proxy_active_lock.release() #allow any new covs while we do the set
                if event: #pending event
                    if self.debug:
                        print str(event)
                    value = event.results()[1]['value']
                    if isinstance(value, Exception):
                        raise value
                    try: #to set() value on destination node
                        self._proxy_active_destination.set(value) #don't know how long this will take
                        self._proxy_set_exception(None)
                    #failure in attempt to set data, maybe node is not ready yet, try again later    
                    except (ETimeout, EConnectionError, ENotStarted, ENoSuchNode):
                        #put the event back in the active event if no new one has come in while we were trying to set()
                        self._proxy_active_lock.acquire()
                        try:
                            if self._proxy_active_event is None:
                                self._proxy_active_event = event #put it back in for next attempt unless a new one came in
                        finally:
                            self._proxy_active_lock.release() #allow any new covs while we do the set
                        scheduler.seconds_from_now_do(60, self._proxy_trigger_queue)  #try again in one minute
                        raise #re-raise the set() exception
                    except:
                        raise
                    if self.debug: print 'proxy_active_set call set returned'
            except Exception, e:
                try:
                    self._proxy_set_exception(e)
                    # we have squashed the exception
                    # we want to log exceptions that are potential bugs
                    # but we don't want to fill the msglog with ETimeouts
                    if not isinstance(e, ETimeout):
                        msglog.exception()
                except:
                    # if there is a bug in the set_exception method we want
                    # to see this otherwise it makes debugging difficult
                    msglog.exception()
        finally:
            self._proxy_active_thread_lock.release()
        if self.debug: print 'proxy_active_set done'
        pass
    
    def is_proxy(self):
        if self.link:
            return 1
        return 0

    def linked_node_has_set(self):
        return hasattr(self._proxy_linked_node(), 'set')
    
    ##
    # Lazily discovers reference to linked node
    #
    def _proxy_linked_node(self):
        if self._link is None:
            self._link = as_node(self.link)
        return self._link
    ##
    # If the subclass supports the 'set_exception' interface, keep it updated
    #
    def _proxy_set_exception(self, e):
        if hasattr(self, 'set_exception'):
            self.set_exception(e)
Example #22
0
class Kwh2Kw(CompositeNode):
    def __init__(self):
        self._history = None
        self._history_lock = Lock()
        self._sid = None
        self._nid = 1
        self._poll_failure = False
        self._scheduled = None
        self.running = False
        super(Kwh2Kw, self).__init__()
        return
        
    def configure(self, cd):
        super(Kwh2Kw, self).configure(cd)
        set_attribute(self, 'link', REQUIRED, cd)
        # sample_period and window used to set the number of
        # samples that constitutes the size of the moving avg.
        set_attribute(self, 'sample_period', 10.0, cd, float)
        set_attribute(self, 'window', 120, cd, int)
        self._window_size = self.window / self.sample_period
        if self.running:
            # reconfigure things
            self.stop()
            self.start()
        return
               
    def configuration(self):
        cd = super(Kwh2Kw, self).configuration()
        get_attribute(self, 'link', cd)
        get_attribute(self, 'sample_period', cd)
        get_attribute(self, 'window', cd)
        return cd
        
    def start(self):
        super(Kwh2Kw, self).start()
        self.running = True
        self._history = KwList(self._window_size)
        self._sid = SM.create_polled({self._nid:self.link})
        # retrieve an initial value to start things off 
        value = ts = None
        result = SM.poll_all(self._sid)
        if result is None:
            # still waiting
            try:
                value = as_node(self.link).get()
                ts = time.time()
            except:
                pass
        else:
            try:
                value = result[self._nid]['value']
                ts = result[self._nid]['timestamp']
            except:
                pass
            if isinstance(value, MpxException):
                value = None
        if value and ts:
            self._history.add(value, ts)
        self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update)
        return
        
    def stop(self):
        self.running = False
        self._history = None
        try:
            SM.destroy(self._sid)
        except:
            pass
        self._sid = None
        self._history_lock.acquire()
        try:
            self._history = None
            s = self._scheduled
            self._scheduled = None
            if s is not None:
                try:
                    s.cancel()
                except:
                    pass
        finally:
            self._history_lock.release()
        return
        
    ##
    # update() can be relatively slow, run it on a threadpool
    def run_update(self):
        NORMAL.queue_noresult(self.update)
        return
        
    def update(self):
        try:
            value = ts = None
            result = SM.poll_all(self._sid)
            if result is not None:
                value = result[self._nid]['value']
                ts = result[self._nid]['timestamp']
            self._history_lock.acquire()
            try:
                if value is None or isinstance(value, MpxException):
                    # there were problems collecting during this period, 
                    # our calculation should not proceed
                    self._history.clear()
                    if not self._poll_failure:
                        # log the failure, but don't spam the msglog
                        self._poll_failure = True
                        msglog.log('Kwh2Kw', msglog.types.WARN, 
                                   'Failed to retrieve data from %s' % self.link)
                else:
                    self._poll_failure = False
                    self._history.add(value, ts)
            finally:
                self._history_lock.release()
        except:
            msglog.exception()
        self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update)
        return
            
    def get(self, skipCache=0):
        return self._history.moving_average()
Example #23
0
class _User(PersistentDataObject):
    USERS = _UserDictionary()
    def __init__(self,name,new=0,
                 password_file=PASSWD_FILE,group_file=GROUP_FILE,
                 shadow_file = SHADOW_FILE):
        self.__lock = Lock()
        self.__password_file = password_file
        self.__shadow_file = shadow_file
        self.__group_file = group_file
        self.__loaded = 0
        self.__file_modified = 0
        self.load(name)
        self.meta = {}
        self.USERS.load()
        if not self.USERS.has_key(self.name()):
            msglog.log('broadway',msglog.types.INFO,
                       ('No profile for user %s found, creating'
                        ' new profile' % name))
            self.USERS[self.name()] = str(UUID())
        PersistentDataObject.__init__(self,self.USERS[self.name()])
        PersistentDataObject.load(self)
    def loaded(self):
        self.__lock.acquire()
        try:
            return self.__loaded
        finally:
            self.__lock.release()
    def load(self,name):
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            passwd_db.load()
            if name in passwd_db:
                self.__user = passwd_db[name]
            else:
                self.__user = None
                raise EInvalidValue('name',name,'No such user.')
            self.__file_modified = passwd_db.last_modified()

            # loading /etc/shadow database
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            if name in shadow_db:
                self.__shadow = shadow_db[name]
            else:
                self.__shadow = None
                raise EInvalidValue('User (' ,name, ') does not exist in shadow')
            self.__shadow_file_modified = shadow_db.last_modified()

            self.__loaded = 1
        finally:
            self.__lock.release()
    def reload(self):
        self.load(self.name())
    def save(self):
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            passwd_db.load()
            passwd_db[self.name()] = self.password_entry()
            passwd_db.save()

            # save /etc/shadow content
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            shadow_db[self.name()] = self.shadow_entry()
            shadow_db.save()
        finally:
            self.__lock.release()
        self.load(self.name())
    def name(self):
        return self.__user.user()
    def password(self):
        raise ENotImplemented(self.password)
    def set_password(self,password, validate=True):
        self.__lock.acquire()
        try:
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            shadowentry = shadow_db[self.name()]
            shadowentry.passwd(password, validate)
            shadow_db[self.name()] = shadowentry
            shadow_db.save()
        finally:
            self.__lock.release()
        self.load(self.name())
    def crypt(self):
        return self.__shadow.crypt()
    def set_crypt(self,crypt):
        self.__shadow.crypt(crypt)
    def uid(self):
        return self.__user.uid()
    def set_uid(self,uid):
        self.__user.uid(uid)
    def gid(self):
        return self.__user.gid()
    def set_gid(self,gid):
        self.__user.gid(gid)
    def group(self):
        return self.__user.groups()[0]
    def groups(self):
        group_db = GroupFile(self.__group_file)
        group_db.load()
        return self.__user.groups(group_db)
    def group_ids(self):
        ids = []
        for group in self.groups():
            ids.append(group.gid())
        return ids
    def gecos(self):
        return self.__user.gecos()
    def set_gecos(self,gecos):
        self.__user.gecos(gecos)
    def directory(self):
        return self.__user.directory()
    def set_directory(self,directory):
        self.__user.directory(directory)
    def shell(self):
        return self.__user.shell()
    def set_shell(self,shell):
        self.__user.shell(shell)
    def is_dirty(self):
        if not self.__loaded:
            return 1
        self.__lock.acquire()
        try:            
            passwd_db = PasswdFile(self.__password_file)
            if not passwd_db.exists():
                return 1
            else:
                return not not (passwd_db.last_modified() > self.__file_modified)

            shadow_db = ShadowFile(self.__shadow_file)
            if not shadow_db.exists():
                return 1
            else:
                return not not (shadow_db.last_modified() > self.__shadow_file_modified)
        finally:
            self.__lock.release()
    def set_meta_value(self,name,value):
        self.meta[name] = value
        PersistentDataObject.save(self)
    def get_meta_value(self,name,default=None):
        if self.meta.has_key(name):
            return self.meta[name]
        return default
    def get_meta(self):
        return self.meta.copy()
    def __getitem__(self,name):
        return self.get_meta_value(name)
    def __setitem__(self,name,value):
        return self.set_meta_value(name,value)
    def has_key(self,k):
        return self.meta.has_key(k)
    def items(self):
        return self.meta.items()
    def values(self):
        return self.meta.values()
    def keys(self):
        return self.meta.keys()
    def password_entry(self):
        return self.__user
    def shadow_entry(self):
        return self.__shadow
    def user_type(self):
        return self.__user.user_type()
    def password_expired(self):
        if self.crypt()[:3] == '$1$':
            return False
        else:
            raise EPasswordExpired(self.name())
        return True
    def is_admin(self):
        return self.user_type() == "mpxadmin"
Example #24
0
 class __impl(ImmortalThread):
     tm_counter = Counter(0)
     def __init__(self, timeout=2.0):
         self.timeout = timeout
         self.stations = {}
         self._monitor = monitor.ChannelMonitor(self.timeout)
         self.tm_number = self.tm_counter.increment()
         self._response_tp = ThreadPool(1, 'Jace Response Pool')
         self._pending_responses = Queue()
         self._callbacks = {}
         self._running = False
         self._sync_get_lock = Lock()
         self._last_sync_get = uptime.secs()
         self._cv = Condition()
         ImmortalThread.__init__(self, None, None, 'Jace Transaction Manager')
         return
     def start(self):
         if not self._monitor.is_running():
             self._monitor.start_monitor()
         self._running = True
         self._synchronous_transaction = Transaction(self, None, self._bump_cv)
         self._synchronous_transaction.set_timeout(self.timeout)
         ImmortalThread.start(self)
         return
     def stop(self):
         msglog.log('Jace', INFO, 'Stop Jace Prism Transaction Manger')
         if self._monitor.is_running():
             self._monitor.stop_monitor()
         self._running = False
         return
     def run(self):
         msglog.log('Jace', INFO, 'Starting Jace Prism Transaction Manger.')
         while self._running:
             try:
                 self.send_loop()
                 self.response_loop()
             except:
                 msglog.log('Jace', WARN, 'Jace Transaction Manager - error sending next.')
                 msglog.exception()
         return
     def transaction_completion_handler(self, transaction):
         self.tm_number = self.tm_counter.increment()
         try:
             tid = transaction.tid
             s_id, callback = self._callbacks.get(tid)
             if callback:
                 del self._callbacks[tid]
                 self._pending_responses.put((callback, transaction.get_response()))
         except:
             msglog.exception()
         # recycle the transaction for reuse within the queue
         self.stations.get(s_id).put_transaction(transaction)
         return
     def add_station(self, station):
         s_id = station.get_id()
         self.stations[s_id] = station
         return
     def get_synchronous(self, station, rqst):
         self._sync_get_lock.acquire()
         try:
             t = self._synchronous_transaction
             hdr = self._get_auth_header(station)
             hdr['Connection'] = 'close'
             t.build_request(rqst.url, None, hdr)
             self._cv.acquire()
             try:
                 response = ETimeout()
                 try:
                     t.send_request()
                     self._cv.wait(self.timeout)
                     self._last_sync_get = uptime.secs()
                     if t.is_expired():
                         t.cancel()
                     else:
                         response = t.get_response()
                 except:
                     t.cancel()
             finally:
                 self._cv.release()
             return response
         finally:
             self._sync_get_lock.release()
         return
     def _bump_cv(self, transaction):
         # transaction isn't used
         self._cv.acquire()
         self._cv.notify()
         self._cv.release()
         return
     def send_loop(self):
         for s_id, station in self.stations.items():
             for i in range(station.transaction_limit):
                 try:
                     t, rqst = station.get_next()
                 except Empty:
                     break
                 hdr = self._get_auth_header(station)
                 hdr['Connection'] = 'close'
                 t.build_request(rqst.url, None, hdr)
                 self._callbacks[t.tid] = (s_id, rqst.callback)
                 t.send_request()
         return
     def response_loop(self):
         while 1:
             try:
                 callback, rsp = self._pending_responses.get(False)
                 callback(rsp)
             except Empty:
                 return
             except:
                 msglog.log('Jace', WARN, 'Unexpected error in response_loop')
                 msglog.exception()
         return
     def _get_auth_header(self, station):
         return {"Authorization":
                 "Basic %s" % station.base64string}
Example #25
0
class DeviceProperty(CompositeNode, EventProducerMixin, UpdateMixin):
    _release_cmd_base = ''
    _ovrd_cmd_base = ''
    def __init__(self):
        # overridden by subclasses
        self._pv_index = None
        self._last_rcvd = 0.0
        self._last_rcvd_dlta = 0.0
        self._cached_value = None
        self._cached_result = None
        self._prop_values = None
        self._subscription_lock = Lock()
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        return
    def configure(self, cd):
        super(DeviceProperty, self).configure(cd)
        set_attribute(self, 'swid', REQUIRED, cd)
        set_attribute(self, 'prop_type', REQUIRED, cd)
        set_attribute(self, 'prop_name', self.name, cd)
        set_attribute(self, 'node_root', 'nodeDump', cd)
        set_attribute(self, 'value_key', 'presentValue', cd)
        set_attribute(self, 'bundle', 1, cd, int)
        set_attribute(self, 'ttl', 60, cd, int)
        set_attribute(self, 'read_only', 1, cd, int)
        return
    def configuration(self):
        cd = super(DeviceProperty, self).configuration()
        get_attribute(self, 'swid', cd)
        get_attribute(self, 'prop_type', cd)
        get_attribute(self, 'prop_name', cd)
        get_attribute(self, 'node_root', cd)
        get_attribute(self, 'value_key', cd)
        get_attribute(self, 'bundle', cd)
        get_attribute(self, 'ttl', cd)
        get_attribute(self, 'read_only', cd)
        return cd
    def start(self):
        for prop_name in PROPERTIES.get(self.prop_type, ()):
            if prop_name == self.prop_name:
                # this property will be returned via a get()
                # to this node
                continue
            p = Prop()
            cd = {'name':prop_name,
                  'parent':self}
            p.configure(cd)
            p.start()
        if self.node_root == 'nodeDump' and self.value_key == 'presentValue':
            setattr(self, 'decode', self._decode_fast)
        else:
            setattr(self, 'decode', self._decode_slow)
        self.url = BASE_URL % (self.station.host, self.swid)
        self._rqst = JaceRequest(self.url, ttl=self.ttl)
        self._configure_set()
        super(DeviceProperty, self).start()
        return
    def _configure_set(self):
        if self.read_only:
            return
        self._ovrd_cmd = self._ovrd_cmd_base % (self.station.host, self.swid)
        self._release_cmd = self._release_cmd_base % (self.station.host, self.swid)
        setattr(self, 'set', self._set)
        return
    def get_property_value(self, prop_name):
        if self._prop_values is None:
            self._load_property_value()
        return self._prop_values.get(prop_name, '')
    def _load_property_value(self):
        rsp = self.station.add_request(JaceRequest(self.url))
        if isinstance(rsp, Exception):
            return
        self._prop_values = {}
        if not rsp.is_complete():
            rsp.await_completion()
        data = rsp.read()
        if data.startswith('<!--'):
            data = data[(data[1:].find('<')+1):]
        data_o = self._decode_slow(data)
        for prop in self.children_nodes():
            if not isinstance(prop, Prop):
                continue
            try:
                value = fix_up_value(getattr(data_o, prop.name).get_tag_value())
            except:
                value = ''
            self._prop_values[prop.name] = value
        return
    def _decode_fast(self, data):
        value = None
        data = data.split('\n')
        if self._pv_index is None:
            cnt = 0
            for l in data:
                if l.count('presentValue'):
                    self._pv_index = cnt
                    break
                cnt += 1
        else:
            try:
                l = data[self._pv_index]
            except:
                value = self._decode_slow(data)
                if value:
                    value = fix_up_value(value)
                return value
        if l.count('presentValue') == 0:
            value = self._decode_slow(data)
        else:
            try:
                value = l.split('>')[1].split('<')[0]
            except:
                value = self._decode_slow(data)
        if value:
            value = fix_up_value(value)
        return value
    def _decode_slow(self, data):
        try:
            data_o = xml2code(data)
        except:
            data_o = None
        return data_o
    def event_subscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            already_subscribed = self.event_has_subscribers()
            EventProducerMixin.event_subscribe(self, *args)
            if self.parent.can_bundle() and self.bundle:
                self.parent.subscribe(self.prop_name, self.update_cache)
            elif not already_subscribed:
                self.update_continuous(None)
                if self._cached_result and \
                    (uptime.secs() - self._cached_result.timestamp) < self.ttl:
                    self._trigger_cov(
                        self._cached_result.value, self._cached_result.value, time.time()
                    )
        finally:
            self._subscription_lock.release()
        return
    def event_unsubscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            EventProducerMixin.event_unsubscribe(self, *args)
            if self.parent.can_bundle() and self.bundle:
                self.parent.unsubscribe(self.prop_name)
        finally:
            self._subscription_lock.release()
        return
    def _trigger_cov(self, old_value, new_value, t):
        cov = ChangeOfValueEvent(self, old_value, new_value, t)
        self.event_generate(cov)
        return
    def get(self, skipCache=0):
        v = self.get_result(skipCache).value
        if isinstance(v, Exception):
            raise ETimeout
        return v
    def get_result(self, skipCache=0):
        dt = uptime.secs() - self._last_rcvd
        if dt > self.ttl or self._cached_value is None:
            # data is stale
            self.update() # blocks
        return self._cached_result
    def update_cache(self, value):
        now = uptime.secs()
        if isinstance(value, ValueObj):
            value = value.get(self.prop_name)
        if value is None or isinstance(value, Exception):
            value = ETimeout()
        if value != self._cached_value:
            if self.event_has_subscribers():
                self._trigger_cov(self._cached_value, value, time.time())
            self._cached_value = value
        if self._cached_result is None:
            changes = 0
        else:
            changes = self._cached_result.changes + 1
        self._cached_result = Result(
            self._cached_value, self._last_rcvd, changes
            )
        self._last_rcvd = now
        return
    def has_cov(self):
        return 1
    def _set(self, value):
        if value in (None,'None'):
            url = self._release_cmd
        else:
            url = self._ovrd_cmd + str(value)
        self.station.add_request(JaceRequest(url))
        return
    def _get_station(self):
        return self.parent.parent
    station = property(_get_station)
Example #26
0
class Schedule(CompositeNode, EventProducerMixin):
    def __init__(self):
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        self._schedule_lock = Lock()
        self._schedule_condition = Condition()
        self._value_lock = Lock()
        self._value_condition = Condition()
        self.__schedule = None
        self.__value = None

    def configure(self, config):
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        return config

    def start(self):
        CompositeNode.start(self)

    def stop(self):
        CompositeNode.stop(self)

    def set_schedule(self, client, schedule):
        self._schedule_lock.acquire()
        self._schedule_condition.acquire()
        try:
            self.__schedule = schedule
            self._schedule_condition.notifyAll()
        finally:
            self._schedule_lock.release()
            self._schedule_condition.release()
        self.event_generate(ScheduleChangedEvent(client, schedule))

    def get_schedule(self):
        self._schedule_lock.acquire()
        try:
            schedule = self.__schedule
        finally:
            self._schedule_lock.release()
        if isinstance(schedule, Exception):
            raise schedule
        return schedule

    ##
    # @param schedule Schedule client believes to be current.
    def get_next_schedule(self, schedule, timeout=None):
        self._schedule_lock.acquire()
        try:
            if schedule is not self.__schedule:
                return self.__schedule
            self._schedule_condition.acquire()
        finally:
            self._schedule_lock.release()
        try:
            self._schedule_condition.wait(timeout)
            schedule = self.__schedule
        finally:
            self._schedule_condition.release()
        if isinstance(schedule, Exception):
            raise schedule
        return schedule

    def is_schedule_current(self, schedule):
        self._schedule_lock.acquire()
        try:
            changed = not schedule is self.__schedule
        finally:
            self._schedule_lock.release()
        return changed

    def _set(self, value):
        self._value_lock.acquire()
        self._value_condition.acquire()
        try:
            old = self.__value
            self.__value = value
            self._value_condition.notifyAll()
        finally:
            self._value_lock.release()
            self._value_condition.release()
        if old != value:
            self.event_generate(ChangeOfValueEvent(self, old, value))

    def get(self, skipCache=0):
        self._value_lock.acquire()
        try:
            value = self.__value
        finally:
            self._value_lock.release()
        return value

    def get_next_value(self, value, timeout=None):
        self._value_lock.acquire()
        try:
            if value != self.__value:
                return self.__value
            self._value_condition.acquire()
        finally:
            self._value_lock.release()
        try:
            self._value_condition.wait(timeout)
            value = self.__value
        finally:
            self._value_condition.release()
        return value

    def is_value_current(self, value):
        self._value_lock.acquire()
        try:
            changed = not value == self.__value
        finally:
            self._value_lock.release()
        return changed
Example #27
0
class TestCase(DefaultTestFixture):
    def setUp(self):
        DefaultTestFixture.setUp(self)
        self.lock = Lock()
        self.pool = ThreadPool(3)
        self.queue = Queue()
        self.simple_action_counter = 0
        return

    def tearDown(self):
        self.pool._unload()
        DefaultTestFixture.tearDown(self)
        return

    def simple_action(self, object):
        # @note It appears that even the '+= 1' operation is not
        #       guaranteed to be atomic.
        self.lock.acquire()
        self.simple_action_counter += 1
        self.lock.release()
        return 'simple_action_result'

    def slow_action(self, object):
        time.sleep(1.0)
        return 'slow_action_result'

    def simple_queue_action(self, object):
        self.queue.put(object)
        return

    def test_simple_queue(self):
        self.pool.queue(self.simple_queue_action, self)
        result = self.queue.get(1.0)
        if result is not self:
            raise "Queue returned %r instead of self, %r." % (result, self)
        return

    def test_result(self):
        t1 = time.time()
        pending_result = self.pool.queue(self.simple_action, self)
        result = pending_result.result(10.0)
        t2 = time.time()
        if result != 'simple_action_result':
            raise ("pending_result.result() returned the wrong value (%s)." %
                   result)
        if (t2 - t1) >= 10.0:
            raise "pending_result.result() blocked for no reason."
        return

    def test_pending_reasult(self):
        t1 = time.time()
        pending_result = PendingResult(None, None, self.simple_action, self)
        pending_result_two = self.pool.queue_pending_result(pending_result)
        if pending_result_two is not pending_result:
            raise "pending_result_two is NOT pending_result"
        result = pending_result.result(10.0)
        t2 = time.time()
        if result != 'simple_action_result':
            raise ("pending_result.result() returned the wrong value (%s)." %
                   result)
        if (t2 - t1) >= 10.0:
            raise "pending_result.result() blocked for no reason."
        return

    def test_pending_action(self):
        pending_action = PendingAction(self.simple_queue_action, self)
        self.pool.queue_pending_action(pending_action)
        result = self.queue.get(1.0)
        if result is not self:
            raise "Queue returned %r instead of self, %r." % (result, self)
        return
        return

    def test_result_timeout(self):
        t1 = time.time()
        pending_result = self.pool.queue(self.slow_action, self)
        result = pending_result.result(0.25)
        t2 = time.time()
        if (t2 - t2) >= 1.0:
            raise "Blocked 1 second when a 1/4 second timeout."
        if result != NORESULT:
            raise "Got a result (%s) when none was expected."
        return

    def test_1000_actions(self):
        for i in xrange(0, 1000):
            self.pool.queue(self.simple_action, self)
        time.sleep(0.1)
        t1 = time.time()
        while self.simple_action_counter < 1000:
            tn = time.time()
            if (tn - t1) > 3.0:
                raise (
                    "Taking ridiculously long to process 1000 queued actions.")
            time.sleep(0.1)
        return

    def test_HIGH_pool_1(self):
        t1 = time.time()
        pending_result = HIGH.queue(self.simple_action, self)
        result = pending_result.result(10.0)
        t2 = time.time()
        if result != 'simple_action_result':
            raise ("pending_result.result() returned the wrong value (%s)." %
                   result)
        if (t2 - t1) >= 10.0:
            raise "pending_result.result() blocked for no reason."
        return

    def test_HIGH_pool_2(self):
        self.test_HIGH_pool_1()
        return

    def test_HIGH_pool_resize_1(self):
        HIGH.resize(1)
        if HIGH.size() != 1:
            raise "Resize to 1 thread failed."
        for i in xrange(0, 100):
            HIGH.queue(self.simple_action, self)
        t1 = time.time()
        while self.simple_action_counter < 100:
            tn = time.time()
            if (tn - t1) > 3.0:
                raise (
                    "Taking ridiculously long to process 100 queued actions.")
            time.sleep(0.1)
        return

    def test_HIGH_pool_resize_20(self):
        HIGH.resize(20)
        if HIGH.size() != 20:
            raise "Resize to 20 threads failed."
        for i in xrange(0, 100):
            HIGH.queue(self.simple_action, self)
        t1 = time.time()
        while self.simple_action_counter < 100:
            tn = time.time()
            if (tn - t1) > 3.0:
                raise (
                    "Taking ridiculously long to process 100 queued actions.")
            time.sleep(0.1)
        return
Example #28
0
class _Trigger(CompositeNode):
    implements(ITrigger)
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        self.targetmap = {}
        self.targets = set()
        self.unresolvable = set()
        self.synclock = Lock()
        super(_Trigger, self).__init__(*args)

    security.protect('get_targets', 'View')

    def get_targets(self, unresolved=False):
        #get targets
        targets = []
        for targeturl in self.targets:
            target = self.targetmap.get(targeturl)
            if target and not target.parent:
                message = "Trigger %s resetting pruned target: %r."
                msglog.warn(message % (self.name, targeturl))
                self.targetmap.pop(targeturl)
                target = None
            if not target:
                try:
                    target = self.nodespace.as_node(targeturl)
                except KeyError:
                    if targeturl not in self.unresolvable:
                        message = "Trigger %s Unable to resolve target: %r."
                        msglog.warn(message % (self.name, targeturl))
                    self.unresolvable.add(targeturl)
                else:
                    self.targetmap[targeturl] = target
                    self.unresolvable.discard(targeturl)
            if target:
                targets.append(target)
            elif unresolved:
                targets.append(targeturl)
        return targets

    security.protect('get_target_names', 'View')

    def get_target_names(self):
        return [target.name for target in self.get_targets()]

    security.protect('add_target', 'Configure')

    def add_target(self, target):
        if not isinstance(target, str):
            targeturl = as_node_url(target)
        else:
            targeturl = target
            try:
                target = self.nodespace.as_node(targeturl)
            except KeyError:
                target = None
        if targeturl == "/":
            raise ValueError("Invalid trigger target: %r" % target)
        self.synclock.acquire()
        try:
            if targeturl not in self.targets:
                self.targets.add(targeturl)
                if target:
                    self.targetmap[targeturl] = target
                else:
                    message = "Trigger %r added unresolvable target: %r"
                    msglog.warn(message % (self.name, targeturl))
                added = True
            else:
                added = False
                message = "Trigger %r not adding target %r: already exists."
                msglog.warn(message % (self.name, targeturl))
        finally:
            self.synclock.release()
        return added

    security.protect('remove_target', 'Configure')

    def remove_target(self, target):
        if not isinstance(target, str):
            targeturl = as_node_url(target)
        else:
            targeturl = target
        self.synclock.acquire()
        try:
            self.targets.remove(targeturl)
        except KeyError:
            removed = False
            message = "Target %s not removed from %s: does not exist."
            msglog.warn(message % (target, self))
        else:
            try:
                self.targetmap.pop(targeturl)
            except KeyError:
                pass
            removed = True
            msglog.inform("Target %s removed from %s." % (target, self))
        finally:
            self.synclock.release()
        return removed
Example #29
0
class OmniProto(CompositeNode):
    """ This class is modelled to hold the omnimeter protocol . 
    The class gives protocol level service for enabling and disabling protocol
     and enabling and disabling the debug prints . """
    __node_id = 'fcab0563-fabf-452a-b527-f592d04dbe8e'
    
    def __init__(self):
        super(OmniProto, self).__init__()
        self.lock = Lock()
        self.lh = None
        self.conn = None

    def configure(self, config):
#        #polling period was used in case of thread
#        set_attribute(self, 'polling_period', 120, config, int)
        set_attribute(self, 'cache_life', 10, config, int)
        set_attribute(self, 'retry_count', 3, config, int)
        set_attribute(self, 'reply_timeout', 1, config, float)
        super(OmniProto, self).configure(config)  
        if self.debug:
            msglog.log('omnimeter', msglog.types.INFO, 
                       "OMNIMETER in configure")
            msglog.log('omnimeter', msglog.types.INFO, "Enabled:%d" 
                       % (self.enabled))
            msglog.log('omnimeter', msglog.types.INFO, "Debug:%d" 
                       % (self.debug))

    def configuration(self):
        config = super(OmniProto, self).configuration()
        get_attribute(self, 'cache_life', config)
        get_attribute(self, 'retry_count', config)
        get_attribute(self, 'reply_timeout', config)
#        #polling period was used in case of thread        
#        get_attribute(self, 'polling_period', config)
        return config

    def start(self):
        if self.is_running():
            raise EAlreadyRunning()
        self.conn = gdconn.FrameworkSerialPortWrapper(self.parent)
        self.lh = gdlh.SimpleLineHandler(self.conn)
        try:
            self.lh.connect()
        except EAlreadyOpen:
            msglog.log('omnimeter', msglog.types.ERR, 
                       'COM Port already in use')
            raise
            
        #stx seems to be the core of omnimeter.
        #though stx shouldn't be here, it ensures we recieve data properly
        #inspite of variable no. of FEs
        self.stx_obj = om.start_byte()
        # starting the polling thread. 
        # Polling thread not being used currently
#        self.start_thread()
        msglog.log('omnimeter', msglog.types.INFO, 
                       "OMNIMETER Protocol started")
        super(OmniProto, self).start()

    def stop(self):
        if not self.is_running():
            raise ENotRunning()
        self.lh.disconnect()
        #have to see some more here ??
        super(OmniProto, self).stop()
        msglog.log('omnimeter', msglog.types.INFO, 
                   "OMNIMETER Protocol stopping")

    def send_request(self, request_obj,
                     response_obj, wait_time=None, numretries=None):
        """API to devices to send a request object and wait for response

        devices may provide wait_time, retry_count else may use defaults
        provided by protocol
        """
        if not self.is_running():
            raise ENotRunning()
        if wait_time is None:
            wait_time = self.reply_timeout
        if numretries is None:
            numretries = self.retry_count
        #have to lock here to ensure, no one drains the port out
        if self.debug:
            msglog.log('omnimeter', msglog.types.INFO, 
                       'wait time and numretries are %s'
                       % str((wait_time, numretries)))
        save_wait_time = wait_time
        self.lock.acquire()
        try:
            while numretries:
                try:
                    self.conn.drain()
                    wait_time = save_wait_time
                    t0 = time.time()
                    #loopback object
                    res = self.lh.send_request_with_response(request_obj, 
                                                             request_obj, 
                                                             wait_time)
                    if not res:
                        #should not happen
                        raise EWriteError()
                    wait_time = wait_time - (time.time() - t0)
                    if self.debug:
                        msglog.log('omnimeter', msglog.types.INFO, 
                                   'got loopback-resp:time left:%f' 
                                   % wait_time)
                    if wait_time < 0 :
                        #can never be
                        raise ETimeout() 
                    #wait until we get first byte of packet
                    res = 0    
                    while not res:
                        t0 = time.time()
                        res = self.lh.receive_response(self.stx_obj, wait_time)
                        wait_time = wait_time - (time.time() - t0)
                        if wait_time < 0 :
                            raise ETimeout()
                    if self.debug:
                        msglog.log('omnimeter', msglog.types.INFO, 
                                   'got first byte. wait time:%f' % wait_time)
                    res = self.lh.receive_response(response_obj, 
                                                   wait_time)
                    if not res:
                        raise EInvalidMessage()
                    return
                except:
                    numretries -= 1
        finally:
            self.lock.release()
        if self.debug:
            msglog.log('omnimeter', msglog.types.INFO,
                       'Exhausted no. of retries. Raising last exception')
        raise 
Example #30
0
class LogAndExport(ServiceNode):
    def __init__(self):
        self._lock = Lock()
        self._started = 0
        self._alarm = []  # can have a whole MESS o' alarms at startup...
        self._scheduled = None
        self.trigger_node_url_posn = None  # allow source stamping
        self.trigger_node_msg_posn = None  # allow source stamping
        self._waiting_alarm_sid = None
        ServiceNode.__init__(self)

    def configure(self, config):
        ServiceNode.configure(self, config)
        set_attribute(self, 'log', REQUIRED, config)

    def configuration(self):
        config = ServiceNode.configuration(self)
        get_attribute(self, 'log', config, as_node_url)
        return config

    def start(self):
        self._lock.acquire()
        try:
            self.log = as_node(self.log)
            columns_node = self.log.get_child('columns')
            self.ts_position = columns_node.get_child('timestamp').position
            # Allow source stamping:
            if columns_node.has_child('trigger_node_url'):
                self.trigger_node_url_posn = columns_node.get_child(
                    'trigger_node_url').position
            if columns_node.has_child('trigger_node_msg'):
                self.trigger_node_msg_posn = columns_node.get_child(
                    'trigger_node_msg').position
            self._started = 1
        finally:
            self._lock.release()
        self.export_waiting_alarm()
        return ServiceNode.start(self)

    def stop(self):
        if not self._waiting_alarm_sid is None:
            try:
                scheduler.remove(self._waiting_alarm_sid)
            except:  # SID may already have expired and been removed...
                msglog.exception()
            self._waiting_alarm_sid = None
        self._started = 0
        ServiceNode.stop(self)
        return

    def export(self, alarm, attempt=0):
        self._lock.acquire()
        try:
            if (not self._started):
                self._alarm.append(alarm)
                # No need to set scheduler here; start() will call
                # export_waiting_alarm()...
                return
            # Even if this node is already started, do not attempt to
            # export alarm unless the linked log node and its collector
            # object are extant and started:
            if (self.log.collector is None):
                self._alarm.append(alarm)
                if (self._waiting_alarm_sid is
                        None):  # if we're not already scheduled, do it:
                    # Need to wait long enough for log.start() to finish creating
                    # and starting collector. ***GUESS*** 10.0 sec. Symptom of not
                    # waiting long enough: ENotStarted error raised below:
                    self._waiting_alarm_sid = scheduler.after(
                        10.0, self.export_waiting_alarm, ())
                return
        finally:
            self._lock.release()
        self.log.collector.pause()
        try:
            try:
                if not self.log.collector.running:
                    raise ENotStarted('Collector not started yet.')
                entry = self.log.collector.get_entry()
                entry[self.ts_position] = time.time()
                # Stamp source, if target log columns support it:
                if isinstance(self.trigger_node_url_posn, int):
                    entry[self.trigger_node_url_posn] = as_node_url(
                        alarm.source)
                if isinstance(self.trigger_node_msg_posn, int):
                    entry[self.trigger_node_msg_posn] = str(alarm)
                self.log.add_entry(entry)
                t = time.time()
                for child in self.log.get_child('exporters').children_nodes():
                    child.go(t)  # starts threads for long ops
            except:
                msglog.exception()
                if attempt > alarm.source.send_retries:
                    msglog.log('broadway', msglog.types.WARN,
                               'Export of alarm failed, aborting send.')
                    raise MpxException('Log and export failed.')
                else:
                    msglog.log('broadway', msglog.types.WARN,
                               'Log on alarm failed, delaying 1.0 sec.')
                    self._lock.acquire()
                    try:
                        if self._scheduled != None:
                            scheduler.cancel(self._scheduled)
                        self._scheduled = scheduler.after(
                            1, self.export, (alarm, attempt + 1))
                    finally:
                        self._lock.release()
        finally:
            self.log.collector.play()
        return

    ##
    # export_waiting_alarm: forces logging/export of self._alarm elements
    # if self.log.collector was None during export().
    # @todo May need to spin off thread(s), to avoid delaying Scheduler.
    #
    def export_waiting_alarm(self):
        if (self._started == 1) \
           and (not self.log.collector is None):
            if not self._waiting_alarm_sid is None:
                try:
                    scheduler.remove(self._waiting_alarm_sid)
                except:  # SID may already have expired and been removed...
                    msglog.exception()
                self._waiting_alarm_sid = None
            while len(self._alarm) > 0:
                init_len = len(self._alarm)
                alarm = self._alarm.pop(0)
                self.export(alarm)  # should leave alarm off of list...
                if init_len <= len(self._alarm):
                    break  # failed to keep alarm off the list
        elif (len(self._alarm) > 0):
            self._waiting_alarm_sid = scheduler.after(
                10.0, self.export_waiting_alarm, ())
        return
Example #31
0
class DynDNSManager( ConfigurableNode ):
    def __init__( self ):
        ConfigurableNode.__init__( self )
        self.isRunning = 0
        self.ipcheck_pid = 0
        self._lock = Lock()
        self.thread = None
        self.first_time = 0
        self.log_name = 'broadway'
        self.update_count = 0
            
    def _is_debug( self ):
        if self.__dict__.has_key( 'debug' ):       
            if self.debug:
                return 1
        return 0

    def msglog( self, msg ):
        if self._is_debug():       
            msglog.log( self.log_name, msglog.types.DB, msg )
                
    def configure( self, config_dict ):     
        ConfigurableNode.configure( self, config_dict )
        set_attribute( self, 'enable', 0, config_dict, int )
        set_attribute( self, 'debug', 0, config_dict, int )
        
        set_attribute( self, 'ddns_service', 0, config_dict )
        set_attribute( self, 'ddns_acct', 0, config_dict )
        set_attribute( self, 'ddns_pswd', 0, config_dict )
        set_attribute( self, 'host_name', 0, config_dict )
        map_to_attribute( self, 'period', 0, config_dict, map_to_seconds )
        
    def configuration( self ):
        config_dict = ConfigurableNode.configuration( self )
        get_attribute( self, 'enable', config_dict )
        get_attribute( self, 'debug', config_dict )
        
        get_attribute( self, 'ddns_service', config_dict )
        get_attribute( self, 'ddns_acct', config_dict )
        get_attribute( self, 'ddns_pswd', config_dict )
        get_attribute( self, 'host_name', config_dict )
        map_from_attribute( self, 'period', config_dict, map_from_seconds )
        
        return config_dict

    def start( self ):
        if self.enable:
            msglog.log( self.log_name, msglog.types.INFO,
                        "STARTING %s, period = %d" % (self.name, self.period) )
            if not self.isRunning:
                self.isRunning = 1
                 # Wait for a bit to give time for a possible PPP connection
                 # to be brought up.
                scheduler.seconds_from_now_do( 90, self.go )
            else:
                raise EAlreadyRunning

    def stop( self ):
        if self.isRunning:
            if self.ipcheck_pid:
                os.kill( self.ipcheck_pid, signal.SIGKILL )
            self.isRunning = 0

    def go( self ):
        # Lock here
        self._lock.acquire()
        try:
            if self.thread and self.thread.isAlive():
                # Don't do it!
                return
            self.thread = Thread( name = self.name, target = self._complete, args = () )
            self.thread.start()
        finally:
            self._lock.release()
       
    def _complete( self ):
        try:
            self._ipcheck()
        except:
            msglog.exception()
            
        if self.isRunning and self.period:
            # Schedule another run in self.period seconds
            scheduler.seconds_from_now_do( self.period, self.go )

    def _ipcheck( self ):
        cmd = 'ipcheck -p ' + properties.ETC_DIR
        # Ignore errors the first time after startup.
        if self.update_count == 0:
            cmd += ' -e'
        if self._is_debug():
            cmd += ' -dv'
        cmd += ' %s %s %s' % (self.ddns_acct, self.ddns_pswd, self.host_name)

        # Start the ip checker
        self.msglog( "running %s" % cmd )
        child = Popen4( cmd )
        self.ipcheck_pid = child.pid
        outfile = child.fromchild
        
        # Wait for ip checker to finish.  Log any output in the message log.
        while 1:
            result = select( [outfile], [], [], 3.0 )
            if result[0]:
                lines = outfile.readlines()
                for line in lines:
                    self.msglog( line )
            status = child.poll()
            if not status == -1:
                break
        self.ipcheck_pid = 0
        
        if os.WIFEXITED( status ):
            exit_code = os.WEXITSTATUS( status )
            self.msglog( 'ipcheck exit status = %d' % exit_code )
        else:
            self.msglog( 'ipcheck forcibly stopped, status = %d' % status )
            
        self.update_count += 1
Example #32
0
class RO_CommandCache:
    class CommandION(CompositeNode):
        ##
        # @param rgvm A string representation of the Response Get Value Method.
        #        Example 'self.cached_response().engdt1_response().engine_rpm'.
        def __init__(self, cache, name, rgvm, args):
            CompositeNode.__init__(self)
            self.parent = cache.ion()
            self.name = name
            self._cache = cache
            file = 'mpx.ion.capstone.micro_turbine.personality.RO_CommandCache.CommandION.__init__'
            self._compiled_reference = compile(rgvm, file, 'eval')
            self._rgvm_args = args
            self.parent._add_child(self)
        def cached_response(self):
            return self._cache.cached_response()
        def get(self, skipCache=0):
            cache = self._cache
            cache.lock()
            try:
                response, cached = cache._response()
                value = apply(eval(self._compiled_reference), self._rgvm_args)
            finally:
                cache.unlock()
            return value
        def get_result(self, skipCache=0, **keywords):
            cache = self._cache
            cache.lock()
            try:
                response, cached = cache._response()
                result = Result()
                result.timestamp = cache.timestamp()
                result.cached = cached
                result.value = apply(eval(self._compiled_reference), self._rgvm_args)
            finally:
                cache.unlock()
            return result
    def __init__(self, parent, line_handler, command, timeout=1.0):
        self._ion = parent
        self._lh = line_handler
        self._command = command
        self._timeout = timeout
        self._cache = None
        self._timestamp = time.time()
        self._expire_after = self._timestamp - 1.0
        self._lock = Lock() # Light weight, non-reentrant lock.
        self._map = {}
    ##
    # @return The time that the cache was last refreshed.
    def timestamp(self):
        return self._timestamp
    ##
    # @return The ion that is considerred the parent of all cached values.
    def ion(self):
        return self._ion
    ##
    # @return True if the cache is valid.
    def is_valid(self):
        return self._cache and self._expire_after >= time.time()
    ##
    # @return True if the cache is dirty (not valid).
    def is_dirty(self):
        return not self.is_valid()
    ##
    # Mark the cache as dirty, forcing a refresh on the next read.
    def mark_dirty(self):
        self._cache = None
    ##
    # Lock the cache for exclusive access.
    def lock(self):
        self._lock.acquire()
    ##
    # Unlock the cache from exclusive access mode.
    def unlock(self):
        self._lock.release()
    ##
    # Return the cached response, if any.
    def cached_response(self,skipCache=0):
        return self._cache
    ##
    # Refresh the cache from the command.
    # @note Should only be invoked locked.
    def _refresh(self):
        self._cache = self._lh.command(self._command)
        self._timestamp = time.time()
        self._expire_after = self._timestamp + self._timeout
    ##
    # Return the cached response, ensuring it is minty fresh.
    # @note Should only be invoked locked.
    def _response(self,skipCache=0):
        cached = 1
        if skipCache or not self.is_valid():
            self._refresh()
            cached = 0
        return self._cache, cached
    ##
    # @param rgvm A string representation of the Response Get Value Method. 
    def map_child(self, name, rgvm, args=()):
        cache = self
        child = self.CommandION(cache, name, rgvm, args)
        self._map[name] = child
Example #33
0
class UserManager(ServiceNode):
    class _NoneUser:
        def __init__(self):
            self.__name = 'NoneUser'

        def name(self):
            return self.__name

    def __init__(self):
        ##
        # Used to control access to the user control dictionaries.
        self.__lock = Lock()
        ##
        # There is only every one instance of a User cached in memory.
        self.__users = {'NoneUser': self._NoneUser()}
        ##
        # For every derived CacheableAuthenticator that has been used
        # to authenticate a User instance, there is one cached
        # authenticator instance.
        self.__pending = {DigestRFC2617Authenticator: []}
        ServiceNode.__init__(self)
        return

    def has_user(self, name):
        users = self.__users
        if not users.has_key(name):
            try:
                user = User(name, 0, PASSWD_FILE)
                user._authenticators = {}
                users[name] = user
            except Exception:
                msglog.log("broadway", msglog.types.WARN,
                           "User '%s' not found" % name)
                return False
        return users.has_key(name)

    def get_user(self, name):
        users = self.__users
        if not users.has_key(name):
            user = User(name, 0, PASSWD_FILE)
            user._authenticators = {}
            users[name] = user
        return users[name]

    def remove_user(self, name):
        users = self.__users
        if users.has_key(name):
            del users[name]

    def new_user(self, name):
        raise ENotImplemented(self.new_user)

    def initialize_authenticator(self, authenticator, **keywords):
        name = authenticator.name(**keywords)
        file = PASSWD_FILE
        if keywords.has_key('_file_'):
            file = keywords['_file_']
        users = self.__users
        self.__lock.acquire()
        try:
            if not users.has_key(name):
                try:
                    user = User(name, 0, file)
                except EInvalidValue:
                    raise EAuthenticationFailed()
                user._authenticators = {}
                users[name] = user
                validator = authenticator(users[name], **keywords)
            else:
                user = users[name]
                current_key = authenticator.current_key(**keywords)
                validator = authenticator(user, **keywords)
            validator.validate(**keywords)
            next_key = validator.next_key()
            users[name]._authenticators[next_key] = validator
        finally:
            self.__lock.release()
        return validator

    def user_from_authenticator(self, authenticator, **keywords):
        validator = self.initialize_authenticator(authenticator, **keywords)
        return validator.user

    def user_from_pam(self, name, password, **keywords):
        return self.user_from_authenticator(PAMAuthenticator,
                                            name=name,
                                            passwd=password,
                                            **keywords)

    ##
    # Authenticate a user via the system user name and password.
    #
    # @param username The system username to authenticate.
    # @param password The clear text password of the user.
    # @return A User object representing the authenticated user.
    # @exception EAuthenticationFailed
    def user_from_cleartext(self, name, password, **keywords):
        return self.user_from_authenticator(ClearTextAuthenticator,
                                            name=name,
                                            passwd=password,
                                            **keywords)

    def validator_from_cleartext(self, name, password, **keywords):
        return self.initialize_authenticator(ClearTextAuthenticator,
                                             name=name,
                                             passwd=password,
                                             **keywords)

    ##
    # Authenticate a user via the system user name and already crypted
    # password.
    #
    # @param username The system username to authenticate.
    # @param password The password of the user, crypted.
    # @return A User object representing the authenticated user.
    # @exception EAuthenticationFailed
    def user_from_crypt(self, name, crypted_password, **keywords):
        return self.user_from_authenticator(CryptAuthenticator,
                                            name=name,
                                            crypt=crypted_password,
                                            **keywords)

    def validator_from_crypt(self, name, crypted_password, **keywords):
        return self.initialize_authenticator(CryptAuthenticator,
                                             name=name,
                                             crypt=crypted_password,
                                             **keywords)

    ##
    #
    # @return A User object representing the authenticated user.
    # @exception EAuthenticationFailed
    def user_from_rfc2617_basic(self, credentials, **keywords):
        return self.user_from_authenticator(BasicRFC2617Authenticator,
                                            credentials=credentials,
                                            **keywords)

    def validator_from_rfc2617_basic(self, credential, **keywords):
        return self.initialize_authenticator(BasicRFC2617Authenticator,
                                             credentials=credential,
                                             **keywords)

    def new_rfc2617_basic_user(self, **keywords):
        return self.__users['NoneUser']

    ##
    #
    # @return A User object representing the authenticated user.
    # @exception EAuthenticationFailed
    def user_from_rfc2617_digest(self, **keywords):
        return self.validator_from_rfc2617_digest(**keywords).user

    def validator_from_rfc2617_digest(self, **keywords):
        authenticator = DigestRFC2617Authenticator
        file = PASSWD_FILE
        if keywords.has_key('_file_'):
            file = keywords['_file_']
        name = authenticator.name(**keywords)
        nonce = authenticator.current_id(**keywords)
        current_key = authenticator.current_key(**keywords)
        self.__lock.acquire()
        try:
            if not self.__users.has_key(name):
                user = User(name, 0, file)
                user._authenticators = {}
            else:
                user = self.__users[name]
            authenticators = user._authenticators
            if not authenticators.has_key(current_key):
                pending = self.__pending[authenticator]
                if nonce in pending:
                    pending.remove(nonce)
                    authenticators[current_key] = authenticator(
                        user, **keywords)
                    authenticators[current_key].initiate_nonce(nonce)
                else:
                    raise EAuthenticationFailed('Unknown nonce value.')
            self.__users[name] = user
        finally:
            self.__lock.release()
        return self.initialize_authenticator(authenticator, **keywords)

    def new_rfc2617_digest_user(self, next_nonce):
        self.__pending[DigestRFC2617Authenticator].append(next_nonce)
        return self.__users['NoneUser']
Example #34
0
class ActiveProxyAbstractClass(EventConsumerMixin):
    def __init__(self):
        self._link = None  #get 'actual' node
        self.link = None
        self._proxy_get = None  #set to actual's preferred get method
        self._proxy_set = None
        self._proxy_start_exception = None
        self._proxy_sid = None
        self.proxy_direction = GET_ONLY  #direction subscription "pushes" the data
        self._proxy_active_source = None
        self._proxy_active_destination = None
        self._proxy_active_lock = Lock()
        self._proxy_active_thread_lock = Lock()
        self._proxy_active_event = None
        self._proxy_trigger_counter = 0
        self._retry_win_high = 30
        EventConsumerMixin.__init__(self, self.change_of_value)
        self.debug = debug

    def configure(self, cd):
        set_attribute(self, 'link', None, cd, str)
        set_attribute(self, 'error_response', '%ERROR%', cd,
                      str)  #keywords: %ERROR% %NONE% or desired value
        set_attribute(self, 'proxy_direction', self.proxy_direction, cd, int)

    def configuration(self, cd=None):
        if cd is None:
            cd = {}
        get_attribute(self, 'link', cd, str)
        get_attribute(self, 'error_response', cd, str)
        get_attribute(self, 'proxy_direction', cd, str)
        return cd

    ##
    def start(self):
        self._proxy_start_exception = None
        if self.is_proxy():
            self._proxy_start_active_mode()

    def stop(self):
        if self._proxy_sid is not None:
            SM.destroy(self._proxy_sid)
            self._proxy_sid = None
        return

    ##
    # start up subscription service if we are in active mode
    # keep trying until we are successful
    # todo: thread safe?
    def _proxy_start_active_mode(self):
        if self.link:
            try:
                if self._proxy_sid is None:  #have not started subscription service yet
                    if self.proxy_direction == GET_ONLY:
                        self._proxy_active_source = self._proxy_linked_node()
                        if self._proxy_active_source is None:
                            raise ENotStarted()
                        self._proxy_active_destination = self
                    else:  #SET_ONLY
                        self._proxy_active_source = self
                        self._proxy_active_destination = self._proxy_linked_node(
                        )
                        if self._proxy_active_destination is None:
                            raise ENotStarted()
                    self._proxy_active_queue = Queue()
                    self._proxy_sid = SM.create_delivered(
                        self, {1: self._proxy_active_source})
                    if self.debug:
                        print 'Active proxy %s started successfully' % (
                            self.name)
            except:
                #it didn't work.  Setup schedule to try again in x seconds.
                if self._retry_win_high < 90:
                    self._retry_win_high += 1
                retry_in = randint(int(self._retry_win_high * .66),
                                   self._retry_win_high)
                scheduler.seconds_from_now_do(retry_in,
                                              self._proxy_start_active_mode)
                #raise  #took this out since it mostly just served to force the scheduler tread to restart
                if self.debug: msglog.exception()

    def change_of_value(self, event):
        #print 'proxy change of value event'
        self._last_event = event
        #if not isinstance(event, ChangeOfValueEvent):
        #return
        # Insert event into queue, and (automatically) notify _event_handler_thread:
        trigger = 0
        self._proxy_active_lock.acquire()
        try:
            self._proxy_active_event = event  # save only latest event, throw away older values
            if self._proxy_trigger_counter < 3:  #no point in trigger too many times for noisy inputs
                self._proxy_trigger_counter += 1
                trigger = 1
        finally:
            self._proxy_active_lock.release(
            )  #could this line go below the next to reduce triggers to thread?
        if trigger:
            #print 'proxy trigger'
            self._proxy_trigger_queue()

    def _proxy_trigger_queue(self):
        #print 'proxy triggerED'
        # run the set function on a thread pool thread:
        NORMAL.queue_noresult(self.proxy_active_set, self)
        return

    def proxy_active_set(self, dummy):
        #print 'active proxy event'
        self._proxy_active_thread_lock.acquire(
        )  #only one set at a time is active
        try:
            try:
                event = None
                self._proxy_active_lock.acquire()
                try:
                    event = self._proxy_active_event
                    self._proxy_active_event = None
                    if self._proxy_trigger_counter:
                        self._proxy_trigger_counter -= 1
                finally:
                    self._proxy_active_lock.release(
                    )  #allow any new covs while we do the set
                if event:  #pending event
                    if self.debug:
                        print str(event)
                    value = event.results()[1]['value']
                    if isinstance(value, Exception):
                        raise value
                    try:  #to set() value on destination node
                        self._proxy_active_destination.set(
                            value)  #don't know how long this will take
                        self._proxy_set_exception(None)
                    #failure in attempt to set data, maybe node is not ready yet, try again later
                    except (ETimeout, EConnectionError, ENotStarted,
                            ENoSuchNode):
                        #put the event back in the active event if no new one has come in while we were trying to set()
                        self._proxy_active_lock.acquire()
                        try:
                            if self._proxy_active_event is None:
                                self._proxy_active_event = event  #put it back in for next attempt unless a new one came in
                        finally:
                            self._proxy_active_lock.release(
                            )  #allow any new covs while we do the set
                        scheduler.seconds_from_now_do(
                            60, self._proxy_trigger_queue
                        )  #try again in one minute
                        raise  #re-raise the set() exception
                    except:
                        raise
                    if self.debug: print 'proxy_active_set call set returned'
            except Exception, e:
                try:
                    self._proxy_set_exception(e)
                    # we have squashed the exception
                    # we want to log exceptions that are potential bugs
                    # but we don't want to fill the msglog with ETimeouts
                    if not isinstance(e, ETimeout):
                        msglog.exception()
                except:
                    # if there is a bug in the set_exception method we want
                    # to see this otherwise it makes debugging difficult
                    msglog.exception()
        finally:
            self._proxy_active_thread_lock.release()
        if self.debug: print 'proxy_active_set done'
        pass

    def is_proxy(self):
        if self.link:
            return 1
        return 0

    def linked_node_has_set(self):
        return hasattr(self._proxy_linked_node(), 'set')

    ##
    # Lazily discovers reference to linked node
    #
    def _proxy_linked_node(self):
        if self._link is None:
            self._link = as_node(self.link)
        return self._link

    ##
    # If the subclass supports the 'set_exception' interface, keep it updated
    #
    def _proxy_set_exception(self, e):
        if hasattr(self, 'set_exception'):
            self.set_exception(e)
Example #35
0
class DeviceProperty(CompositeNode, EventProducerMixin, UpdateMixin):
    _release_cmd_base = ''
    _ovrd_cmd_base = ''

    def __init__(self):
        # overridden by subclasses
        self._pv_index = None
        self._last_rcvd = 0.0
        self._last_rcvd_dlta = 0.0
        self._cached_value = None
        self._cached_result = None
        self._prop_values = None
        self._subscription_lock = Lock()
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        return

    def configure(self, cd):
        super(DeviceProperty, self).configure(cd)
        set_attribute(self, 'swid', REQUIRED, cd)
        set_attribute(self, 'prop_type', REQUIRED, cd)
        set_attribute(self, 'prop_name', self.name, cd)
        set_attribute(self, 'node_root', 'nodeDump', cd)
        set_attribute(self, 'value_key', 'presentValue', cd)
        set_attribute(self, 'bundle', 1, cd, int)
        set_attribute(self, 'ttl', 60, cd, int)
        set_attribute(self, 'read_only', 1, cd, int)
        return

    def configuration(self):
        cd = super(DeviceProperty, self).configuration()
        get_attribute(self, 'swid', cd)
        get_attribute(self, 'prop_type', cd)
        get_attribute(self, 'prop_name', cd)
        get_attribute(self, 'node_root', cd)
        get_attribute(self, 'value_key', cd)
        get_attribute(self, 'bundle', cd)
        get_attribute(self, 'ttl', cd)
        get_attribute(self, 'read_only', cd)
        return cd

    def start(self):
        for prop_name in PROPERTIES.get(self.prop_type, ()):
            if prop_name == self.prop_name:
                # this property will be returned via a get()
                # to this node
                continue
            p = Prop()
            cd = {'name': prop_name, 'parent': self}
            p.configure(cd)
            p.start()
        if self.node_root == 'nodeDump' and self.value_key == 'presentValue':
            setattr(self, 'decode', self._decode_fast)
        else:
            setattr(self, 'decode', self._decode_slow)
        self.url = BASE_URL % (self.station.host, self.swid)
        self._rqst = JaceRequest(self.url, ttl=self.ttl)
        self._configure_set()
        super(DeviceProperty, self).start()
        return

    def _configure_set(self):
        if self.read_only:
            return
        self._ovrd_cmd = self._ovrd_cmd_base % (self.station.host, self.swid)
        self._release_cmd = self._release_cmd_base % (self.station.host,
                                                      self.swid)
        setattr(self, 'set', self._set)
        return

    def get_property_value(self, prop_name):
        if self._prop_values is None:
            self._load_property_value()
        return self._prop_values.get(prop_name, '')

    def _load_property_value(self):
        rsp = self.station.add_request(JaceRequest(self.url))
        if isinstance(rsp, Exception):
            return
        self._prop_values = {}
        if not rsp.is_complete():
            rsp.await_completion()
        data = rsp.read()
        if data.startswith('<!--'):
            data = data[(data[1:].find('<') + 1):]
        data_o = self._decode_slow(data)
        for prop in self.children_nodes():
            if not isinstance(prop, Prop):
                continue
            try:
                value = fix_up_value(
                    getattr(data_o, prop.name).get_tag_value())
            except:
                value = ''
            self._prop_values[prop.name] = value
        return

    def _decode_fast(self, data):
        value = None
        data = data.split('\n')
        if self._pv_index is None:
            cnt = 0
            for l in data:
                if l.count('presentValue'):
                    self._pv_index = cnt
                    break
                cnt += 1
        else:
            try:
                l = data[self._pv_index]
            except:
                value = self._decode_slow(data)
                if value:
                    value = fix_up_value(value)
                return value
        if l.count('presentValue') == 0:
            value = self._decode_slow(data)
        else:
            try:
                value = l.split('>')[1].split('<')[0]
            except:
                value = self._decode_slow(data)
        if value:
            value = fix_up_value(value)
        return value

    def _decode_slow(self, data):
        try:
            data_o = xml2code(data)
        except:
            data_o = None
        return data_o

    def event_subscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            already_subscribed = self.event_has_subscribers()
            EventProducerMixin.event_subscribe(self, *args)
            if self.parent.can_bundle() and self.bundle:
                self.parent.subscribe(self.prop_name, self.update_cache)
            elif not already_subscribed:
                self.update_continuous(None)
                if self._cached_result and \
                    (uptime.secs() - self._cached_result.timestamp) < self.ttl:
                    self._trigger_cov(self._cached_result.value,
                                      self._cached_result.value, time.time())
        finally:
            self._subscription_lock.release()
        return

    def event_unsubscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            EventProducerMixin.event_unsubscribe(self, *args)
            if self.parent.can_bundle() and self.bundle:
                self.parent.unsubscribe(self.prop_name)
        finally:
            self._subscription_lock.release()
        return

    def _trigger_cov(self, old_value, new_value, t):
        cov = ChangeOfValueEvent(self, old_value, new_value, t)
        self.event_generate(cov)
        return

    def get(self, skipCache=0):
        v = self.get_result(skipCache).value
        if isinstance(v, Exception):
            raise ETimeout
        return v

    def get_result(self, skipCache=0):
        dt = uptime.secs() - self._last_rcvd
        if dt > self.ttl or self._cached_value is None:
            # data is stale
            self.update()  # blocks
        return self._cached_result

    def update_cache(self, value):
        now = uptime.secs()
        if isinstance(value, ValueObj):
            value = value.get(self.prop_name)
        if value is None or isinstance(value, Exception):
            value = ETimeout()
        if value != self._cached_value:
            if self.event_has_subscribers():
                self._trigger_cov(self._cached_value, value, time.time())
            self._cached_value = value
        if self._cached_result is None:
            changes = 0
        else:
            changes = self._cached_result.changes + 1
        self._cached_result = Result(self._cached_value, self._last_rcvd,
                                     changes)
        self._last_rcvd = now
        return

    def has_cov(self):
        return 1

    def _set(self, value):
        if value in (None, 'None'):
            url = self._release_cmd
        else:
            url = self._ovrd_cmd + str(value)
        self.station.add_request(JaceRequest(url))
        return

    def _get_station(self):
        return self.parent.parent

    station = property(_get_station)
Example #36
0
    class __impl(ImmortalThread):
        tm_counter = Counter(0)

        def __init__(self, timeout=2.0):
            self.timeout = timeout
            self.stations = {}
            self._monitor = monitor.ChannelMonitor(self.timeout)
            self.tm_number = self.tm_counter.increment()
            self._response_tp = ThreadPool(1, 'Jace Response Pool')
            self._pending_responses = Queue()
            self._callbacks = {}
            self._running = False
            self._sync_get_lock = Lock()
            self._last_sync_get = uptime.secs()
            self._cv = Condition()
            ImmortalThread.__init__(self, None, None,
                                    'Jace Transaction Manager')
            return

        def start(self):
            if not self._monitor.is_running():
                self._monitor.start_monitor()
            self._running = True
            self._synchronous_transaction = Transaction(
                self, None, self._bump_cv)
            self._synchronous_transaction.set_timeout(self.timeout)
            ImmortalThread.start(self)
            return

        def stop(self):
            msglog.log('Jace', INFO, 'Stop Jace Prism Transaction Manger')
            if self._monitor.is_running():
                self._monitor.stop_monitor()
            self._running = False
            return

        def run(self):
            msglog.log('Jace', INFO, 'Starting Jace Prism Transaction Manger.')
            while self._running:
                try:
                    self.send_loop()
                    self.response_loop()
                except:
                    msglog.log(
                        'Jace', WARN,
                        'Jace Transaction Manager - error sending next.')
                    msglog.exception()
            return

        def transaction_completion_handler(self, transaction):
            self.tm_number = self.tm_counter.increment()
            try:
                tid = transaction.tid
                s_id, callback = self._callbacks.get(tid)
                if callback:
                    del self._callbacks[tid]
                    self._pending_responses.put(
                        (callback, transaction.get_response()))
            except:
                msglog.exception()
            # recycle the transaction for reuse within the queue
            self.stations.get(s_id).put_transaction(transaction)
            return

        def add_station(self, station):
            s_id = station.get_id()
            self.stations[s_id] = station
            return

        def get_synchronous(self, station, rqst):
            self._sync_get_lock.acquire()
            try:
                t = self._synchronous_transaction
                hdr = self._get_auth_header(station)
                hdr['Connection'] = 'close'
                t.build_request(rqst.url, None, hdr)
                self._cv.acquire()
                try:
                    response = ETimeout()
                    try:
                        t.send_request()
                        self._cv.wait(self.timeout)
                        self._last_sync_get = uptime.secs()
                        if t.is_expired():
                            t.cancel()
                        else:
                            response = t.get_response()
                    except:
                        t.cancel()
                finally:
                    self._cv.release()
                return response
            finally:
                self._sync_get_lock.release()
            return

        def _bump_cv(self, transaction):
            # transaction isn't used
            self._cv.acquire()
            self._cv.notify()
            self._cv.release()
            return

        def send_loop(self):
            for s_id, station in self.stations.items():
                for i in range(station.transaction_limit):
                    try:
                        t, rqst = station.get_next()
                    except Empty:
                        break
                    hdr = self._get_auth_header(station)
                    hdr['Connection'] = 'close'
                    t.build_request(rqst.url, None, hdr)
                    self._callbacks[t.tid] = (s_id, rqst.callback)
                    t.send_request()
            return

        def response_loop(self):
            while 1:
                try:
                    callback, rsp = self._pending_responses.get(False)
                    callback(rsp)
                except Empty:
                    return
                except:
                    msglog.log('Jace', WARN,
                               'Unexpected error in response_loop')
                    msglog.exception()
            return

        def _get_auth_header(self, station):
            return {"Authorization": "Basic %s" % station.base64string}
class TestCase(DefaultTestFixture, EventConsumerMixin):
    ID1 = "/services/time/UTC/second"
    ID2 = "/services/time/local"
    nrt1to2 = {
        ID1: ID1,
        ID2: ID2,
    }
    ID3 = "/services/time/UTC/milliseconds"
    ID4 = "/services/time/local/minute"
    nrt3to4 = {
        ID3: ID3,
        ID4: ID4,
    }
    nrt1to4 = {}
    nrt1to4.update(nrt1to2)
    nrt1to4.update(nrt3to4)
    ID5 = "/services/time/local/day"
    nrtB10 = {}
    for i in range(0, 10):
        url = "/BatchNode-%03d" % i
        nrtB10[url] = url
    del url

    def __init__(self, *args, **kw):
        DefaultTestFixture.__init__(self, *args, **kw)
        EventConsumerMixin.__init__(self, self.change_of_value)
        self.__event_lock = Lock()
        self.__event_updated_values = {}
        self._cov_counter = 0
        return

    def change_of_value(self, event):
        self._cov_counter += 1
        results = event.results()
        self.__event_lock.acquire()
        try:
            self.__event_updated_values.update(results)
        finally:
            self.__event_lock.release()
        return

    def setUp(self):
        DefaultTestFixture.setUp(self)
        self.__event_updated_values = {}
        self.new_node_tree()
        root = as_internal_node('/')
        self._cov_counter = 0
        GetException().configure({'name': 'exception', 'parent': '/services'})
        SUBSCRIPTION_MANAGER.configure({
            'debug': 0,
            '_normal_pool_size': 2,
            '_slow_pool_size': 2,
            '_prime_pool_size': 2,
            '_minimum_poll_interval': 0.001,
            '_slow_poll_threshold': 0.500,
        })
        for i in range(0, 10):
            f = FastNode()
            f.configure({'parent': root, 'name': "FastNode-%03d" % i})
            s = SlowNode()
            s.configure({'parent': root, 'name': "SlowNode-%03d" % i})
            e = ErrorNode()
            e.configure({'parent': root, 'name': "ErrorNode-%03d" % i})
            b = BatchNode(i & 1)
            b.configure({'parent': root, 'name': "BatchNode-%03d" % i})
        root.start()
        return

    def tearDown(self):
        self.del_node_tree()
        DefaultTestFixture.tearDown(self)
        # self.dump_msglog()
        return

    def __values_changing(self, sid):
        r1 = SUBSCRIPTION_MANAGER.poll_all(sid)
        t1 = time.time()
        while 1:
            changed_values = SUBSCRIPTION_MANAGER.poll_changed(sid)
            if len(changed_values):
                return
            if (time.time() - t1) > 1.0:
                raise "Never got changes for any of the values."
            time.sleep(0.1)
        assert 1, "Can't reach here."

    def __all_plus_exceptions_check(self, all_values):
        no_such_node = all_values['/services/time/is/an/illusion']['value']
        assert isinstance(no_such_node, mpx.lib.exceptions.ENoSuchName), (
            "%r is not mpx.lib.exceptions.ENoSuchName" % no_such_node)
        get_exception = all_values['/services/exception']['value']
        assert get_exception.__class__ is Exception, (
            "%r is not an Exception" % get_exception)
        assert get_exception.args == ("GetException", ), ("%r is not %r" %
                                                          (get_exception.args,
                                                           ("GetException", )))
        return

    def test_create_polled(self):
        sid = SUBSCRIPTION_MANAGER.create_polled()
        return

    def test_create_delivered(self):
        sid = SUBSCRIPTION_MANAGER.create_delivered(self, self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        time.sleep(0.1)
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            self.__event_lock.acquire()
            try:
                if len(self.__event_updated_values) == 4:
                    # We got all 4 values!
                    return
            finally:
                self.__event_lock.release()
            time.sleep(0.1)
        if len(self.__event_updated_values) != 4:
            raise (("Never got changes for all four values, only %d.\n"
                    "Values: %r") % (len(self.__event_updated_values),
                                     self.__event_updated_values))

    def test_destroy(self):
        sids = []
        for i in range(2):
            # ID3 is /services/time/UTC/milliseconds which should
            # change "really fast."
            sid = SUBSCRIPTION_MANAGER.create_polled({self.ID3: self.ID3})
            # Make sure it comes up.
            t1 = time.time()
            self.__values_changing(sid)
            sids.append(sid)
        # Double check the values are changing.
        for sid in sids:
            self.__values_changing(sid)
        # Now nuke one of the suscriptions and see that the other stays valid.
        sid = sids.pop(0)
        SUBSCRIPTION_MANAGER.destroy(sid)
        try:
            SUBSCRIPTION_MANAGER.destroy(sid)
        except ENoSuchSubscription:
            pass
        else:
            raise "No such subscription not detected."
        # Make sure that the other subscription is valid.
        sid = sids.pop(0)
        self.__values_changing(sid)
        # Finally, make sure that the mnr is removed when the last snr is
        # deleted.
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 1:
            raise ("Bogus test, there should be 1 mnr at this point, not %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        SUBSCRIPTION_MANAGER.destroy(sid)
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 0:
            raise ("There should not be any mnrs at this point,"
                   " but there are %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        return

    def test_destroy_batch(self):
        sids = []
        for i in range(2):
            # BatchNodes change "really fast."
            sid = SUBSCRIPTION_MANAGER.create_polled(self.nrtB10)
            # Make sure it comes up.
            t1 = time.time()
            self.__values_changing(sid)
            sids.append(sid)
        # Double check the values are changing.
        for sid in sids:
            self.__values_changing(sid)
        # Now nuke one of the suscriptions and see that the other stays valid.
        sid = sids.pop(0)
        SUBSCRIPTION_MANAGER.destroy(sid)
        try:
            SUBSCRIPTION_MANAGER.destroy(sid)
        except ENoSuchSubscription:
            pass
        else:
            raise "No such subscription not detected."
        # Make sure that the other subscription is valid.
        sid = sids.pop(0)
        self.__values_changing(sid)
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 10:
            raise (
                "Bogus test, there should be 10 mnr at this point, not %r." %
                len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        if len(SUBSCRIPTION_MANAGER.diag_get_mnbs()) != 1:
            raise ("Bogus test, there should be 1 mnb at this point, not %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnbs()))
        SUBSCRIPTION_MANAGER.destroy(sid)
        # Make sure that the mnr is removed when the last snr is deleted.
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 0:
            raise ("There should not be any mnrs at this point,"
                   " but there are %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        # Finally, make sure that the mnb is removed when the last mnr is
        # deleted.
        if len(SUBSCRIPTION_MANAGER.diag_get_mnbs()) != 0:
            raise ("There should not be any mnbs at this point,"
                   " but there are %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnbs()))
        return

    def test_node_reference_table(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to2:
            raise "Node reference table mismatch."
        return

    def test_merge(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to2:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.merge(sid, self.nrt3to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Node reference table mismatch."
        return

    def test_replace(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to2:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.replace(sid, self.nrt3to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt3to4:
            raise "Replaced node reference table mismatch."
        return

    def test_empty(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.empty(sid)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != {}:
            raise "Node reference table not empty."
        return

    def test_add(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to2:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.add(sid, self.ID5, self.ID5)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        nrt125 = {}
        nrt125.update(self.nrt1to2)
        nrt125[self.ID5] = self.ID5
        if nrt != nrt125:
            raise "Node reference table mismatch."
        try:
            SUBSCRIPTION_MANAGER.add(sid, self.ID5, self.ID5)
        except ENodeIDExists:
            pass
        else:
            raise "Node ID in use not detected."
        return

    def test_add_and_get(self):
        st_time = time.time()
        rdict = SUBSCRIPTION_MANAGER.create_polled_and_get(self.nrt1to2)
        # Ensure we got back values for everything
        assert rdict['sid'] != None, "sid is not set in results dictionary."
        for x in rdict['values'].values():
            assert x != None, "Got None in values: %s." % str(rdict['values'])

    def test_modify(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to2:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.modify(sid, self.ID2, self.ID3)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt == self.nrt1to2:
            raise "Modified node reference table not modified."
        if nrt != {self.ID1: self.ID1, self.ID2: self.ID3}:
            raise "Modified node reference table mismatch."
        try:
            SUBSCRIPTION_MANAGER.modify(sid, self.ID3, self.ID2)
        except ENoSuchNodeID:
            pass
        else:
            raise "No such NodeID not detected."
        return

    def test_remove(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        SUBSCRIPTION_MANAGER.remove(sid, self.ID2)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        nrt134 = {}
        nrt134.update(self.nrt1to4)
        del nrt134[self.ID2]
        if nrt != nrt134:
            raise "Node reference table mismatch."
        try:
            SUBSCRIPTION_MANAGER.remove(sid, self.ID2)
        except ENoSuchNodeID:
            pass
        else:
            raise "No such NodeID not detected."
        return

    def test_poll_all(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        # Check that each invokation gets all values.
        for i in range(0, 10):
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            if len(all_values) != len(self.nrt1to4):
                # We did not get all 4 values!
                raise ("poll_all(self.nrt1to4) did not return all values."
                       " (%d out of %d)" %
                       (len(all_values), len(self.nrt1to4)))
        # Check that (eventually) all the values are result dictionaries.
        all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            if None not in all_values.values():
                return
            time.sleep(0.1)
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        if None in all_values.values():
            raise ("Never got changes for all four result dictionaries, %d." %
                   len(all_values))
        return

    def test_poll_all_plus_exceptions(self):
        SUBSCRIPTION_MANAGER._set_tunable_parameters({
            'minimum_poll_interval':
            0.0,
        })
        nrt1to4bad5to6 = {}
        nrt1to4bad5to6.update(self.nrt1to4)
        nrt1to4bad5to6['/services/time/is/an/illusion'] = (
            '/services/time/is/an/illusion')
        nrt1to4bad5to6['/services/exception'] = '/services/exception'
        sid = SUBSCRIPTION_MANAGER.create_polled(nrt1to4bad5to6)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != nrt1to4bad5to6:
            raise "Initial node reference table mismatch."
        # Check that each invokation gets all values.
        for i in range(0, 10):
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            if len(all_values) != len(nrt1to4bad5to6):
                # We did not get all 4 values!
                raise ("poll_all(self.nrt1to4) did not return all values."
                       " (%d out of %d)" %
                       (len(all_values), len(nrt1to4bad5to6)))
        # Check that (eventually) all the values are result dictionaries.
        all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            if None not in all_values.values():
                self.__all_plus_exceptions_check(all_values)
                # Finally, test that a new subscription gets the correct
                # results.
                time.sleep(0.1)
                sid = SUBSCRIPTION_MANAGER.create_polled(nrt1to4bad5to6)
                time.sleep(0.1)
                all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
                self.__all_plus_exceptions_check(all_values)
                time.sleep(0.1)
                all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
                self.__all_plus_exceptions_check(all_values)
                return
            time.sleep(0.1)
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        if None in all_values.values():
            raise ("Never got values for all nodes: %r." % all_values)
        return

    def test_poll_changed(self):
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        all_values = {}
        time.sleep(0.1)
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            time.sleep(0.1)
            changed_values = SUBSCRIPTION_MANAGER.poll_changed(sid)
            all_values.update(changed_values)
            if len(all_values) == 4:
                # We got all 4 values!
                return
        raise "Never got changes for all four values, %d." % len(all_values)
        return

    def test_fast_minimum_poll_interval(self):
        SUBSCRIPTION_MANAGER._set_tunable_parameters({
            'minimum_poll_interval':
            0.0,
        })
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        # Check that each invokation gets all values.
        for i in range(0, 10):
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            if len(all_values) != len(self.nrt1to4):
                # We did not get all 4 values!
                raise ("poll_all(self.nrt1to4) did not return all values."
                       " (%d out of %d)" %
                       (len(all_values), len(self.nrt1to4)))
        # Check that (eventually) all the values are result dictionaries.
        all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            time.sleep(0.1)
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        if None in all_values.values():
            raise (("Never got changes for all four result dictionaries, %d.\n"
                    "Values: %r") % (len(all_values), all_values))
        # ID3 is /services/time/UTC/milliseconds which should
        # change "really fast."
        c1 = all_values[self.ID3]['changes']
        time.sleep(1.0)
        all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        c2 = all_values[self.ID3]['changes']
        if (c2 - c1) < 25:  # It's usually 500 on fearfactory...
            raise "%r only changed %d times in one second." % (
                self.ID3,
                (c2 - c1),
            )
        return

    def test_adjusted_minimum_poll_interval(self):
        SUBSCRIPTION_MANAGER._set_tunable_parameters({
            'minimum_poll_interval':
            0.2,
        })
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrt1to4)
        nrt = SUBSCRIPTION_MANAGER.node_reference_table(sid)
        if nrt != self.nrt1to4:
            raise "Initial node reference table mismatch."
        # Check that each invokation gets all values.
        for i in range(0, 10):
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            if len(all_values) != len(self.nrt1to4):
                # We did not get all 4 values!
                raise ("poll_all(self.nrt1to4) did not return all values."
                       " (%d out of %d)" %
                       (len(all_values), len(self.nrt1to4)))
        # Check that (eventually) all the values are result dictionaries.
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            if None not in all_values.values():
                # ID3 is /services/time/UTC/milliseconds which should
                # change "really fast."
                c1 = all_values[self.ID3]['changes']
                time.sleep(1.0)
                all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
                c2 = all_values[self.ID3]['changes']
                if (c2 - c1) > 6:  # 0.2 == Max 5/second.
                    raise ("1/5th second throttle failed,"
                           " %r changed %d times in one second.") % (
                               self.ID3,
                               (c2 - c1),
                           )
                return
            time.sleep(0.1)
        raise ("Never got changes for all four result dictionaries, %d." %
               len(all_values))
        return

    def test_polled_event_handling(self):
        event_maker = EventProducerTestClass()
        event_maker.configure({'name': 'EventProducerTester', 'parent': '/'})
        event_maker.start()
        sid = SUBSCRIPTION_MANAGER.create_polled({1: event_maker})

        # Wait for polling to start and verify value made it without any events
        t1 = time.time()
        all_values = {1: None}
        while all_values[1] is None:
            if (time.time() - t1) > 1.0:
                raise "Got tired of waiting..."
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            time.sleep(0.1)
        # Check that subscription value is the initial value of 100
        if all_values[1]['value'] != 100:
            raise ("polled_event_handling did not return inital value: " +
                   str(all_values[1]['value']))
        # make a rapid series of changes to the node value
        for i in range(10):
            event_maker._cov_check(i)
            time.sleep(0.1)
        # check change count, should be approx 10
        all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
        change_count = all_values[1]['changes']
        if change_count < 8 or change_count > 12:
            raise ("polled_event_handling change count wrong."
                   "  Should be approx 10, not %d" % (change_count, ))
        # Check that the last value is corrent.
        final_value = all_values[1]['value']
        if final_value != 9:
            raise ("polled_event_handling final value incorrect."
                   "  Should be 9, not %d" % (final_value, ))
        return

    def test_targeted_event_handling(self):
        event_maker = EventProducerTestClass()
        event_maker.configure({'name': 'EventProducerTester', 'parent': '/'})
        event_maker.start()
        nr = {1: event_maker}
        sid = SUBSCRIPTION_MANAGER.create_delivered(self, nr)

        # Wait for polling to start and verify value made it without any events
        t1 = time.time()
        while (time.time() - t1) < 1.0:
            all_values = SUBSCRIPTION_MANAGER.poll_all(sid)
            time.sleep(0.1)
        # Check that subscription value is the initial value of 100
        if all_values[1]['value'] != 100:
            raise ("polled_event_handling did not return inital value: " +
                   str(all_values[1]['value']))
        # make a rapid series of changes to the node value
        for i in range(10):
            event_maker._cov_check(i)
            time.sleep(0.1)
        # check change count, should be approx 10
        value_updates = self.__event_updated_values[1]['changes']
        cov_counts = self._cov_counter
        if value_updates < cov_counts:
            raise (
                "Targeted event handling event count did not match %d vs %d" %
                (value_updates, cov_counts))

    def test_timeout(self):
        sids = []
        for i in range(2):
            if not i:
                timeout = 1.0
            else:
                timeout = None
            # ID3 is /services/time/UTC/milliseconds which should
            # change "really fast."
            sid = SUBSCRIPTION_MANAGER.create_polled({self.ID3: self.ID3},
                                                     timeout)
            # Make sure it comes up.
            t1 = time.time()
            self.__values_changing(sid)
            sids.append(sid)
        # Double check the values are changing and that the subscriptions
        # stay valid while we poll for values.
        t1 = time.time()
        while (time.time() - t1) < 2.0:
            for sid in sids:
                self.__values_changing(sid)
            time.sleep(0.1)
        # Now ensure that sid[0] times out...
        sid = sids.pop(0)
        t1 = time.time()
        while sid in SUBSCRIPTION_MANAGER.diag_get_sids():
            if (time.time() - t1) > 2.0:
                raise "%r did not timeout." % sid
            time.sleep(0.1)
        # Finally, make sure that the other subscription is valid.
        sid = sids.pop(0)
        self.__values_changing(sid)
        return

    def test_timeout_batch(self):
        # nrtB10 changes "really fast."
        sid = SUBSCRIPTION_MANAGER.create_polled(self.nrtB10, 1.0)
        # Make sure it comes up.
        t1 = time.time()
        self.__values_changing(sid)
        # Double check the values are changing and that the subscriptions
        # stay valid while we poll for values.
        t1 = time.time()
        while (time.time() - t1) < 2.0:
            self.__values_changing(sid)
            time.sleep(0.1)
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 10:
            raise (
                "Bogus test, there should be 10 mnr at this point, not %r." %
                len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        if len(SUBSCRIPTION_MANAGER.diag_get_mnbs()) != 1:
            raise ("Bogus test, there should be 1 mnb at this point, not %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnbs()))
        t1 = time.time()
        while sid in SUBSCRIPTION_MANAGER.diag_get_sids():
            if (time.time() - t1) > 2.0:
                raise "%r did not timeout." % sid
            time.sleep(0.1)
        # Make sure that the mnr is removed when the last snr is deleted.
        if len(SUBSCRIPTION_MANAGER.diag_get_mnrs()) != 0:
            raise ("There should not be any mnrs at this point,"
                   " but there are %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnrs()))
        # Finally, make sure that the mnb is removed when the last mnr is
        # deleted.
        if len(SUBSCRIPTION_MANAGER.diag_get_mnbs()) != 0:
            raise ("There should not be any mnbs at this point,"
                   " but there are %r." %
                   len(SUBSCRIPTION_MANAGER.diag_get_mnbs()))
        return

    #
    #
    #
    def _print_subscriptions(self):
        print ""
        print "*" * 60
        for s in SUBSCRIPTION_MANAGER.diag_get_subscriptions():
            print s
        print "*" * 60
        return

    def _print_sids(self):
        print ""
        print "*" * 60
        for s in SUBSCRIPTION_MANAGER.diag_get_sids():
            print s
        print "*" * 60
        return

    def _print_mnrs(self):
        print ""
        print "*" * 60
        for s in SUBSCRIPTION_MANAGER.diag_get_mnrs():
            print s
        print "*" * 60
        return

    def _print_mnbs(self):
        print ""
        print "*" * 60
        for s in SUBSCRIPTION_MANAGER.diag_get_mnbs():
            print s
        print "*" * 60
        return
Example #38
0
class Status(CompositeNode, EventProducerMixin):
    """
    Status is the base class used by different host management services.  It's 
    primary role is to serve as an event producer and interact with the NBMManager
    class in a consistent manner.  Classes derived from Status will implement the
    _get_status method to determine (i.e ping) the course of action that should
    be taken to interact with the remote NBM.
    """
    def __init__(self):
        self._last_rcvd = 0
        self._subscribers = 0
        self._scheduled = None
        self._skip_cache = False
        self._cached_result = None
        self._exec_delay = _Buffer(5)
        self._subscription_lock = Lock()
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        
    def configure(self, cd):
        CompositeNode.configure(self, cd)
        set_attribute(self, 'ttl', 300, cd, int)
        
    def configuration(self):
        cd = CompositeNode.configuration(self)
        get_attribute(self, 'ttl', cd)
        return cd
    
    # _get_status must be implemented by derived classes
    def _get_status(self):
        raise ENotImplemented()
    
    def has_cov(self):
        return 1
        
    def event_has_subscribers(self):
        return bool(self._subscribers)
    
    def event_subscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            already_subscribed = self.event_has_subscribers()
            result = EventProducerMixin.event_subscribe(self, *args)
            self._subscribers += 1
        finally:
            self._subscription_lock.release()
        if not already_subscribed and self._cached_result:
            value = self._cached_result.value
            self._trigger_cov(value, value, time.time())
        return result

    def event_unsubscribe(self, *args):
        self._subscription_lock.acquire()
        try:
            EventProducerMixin.event_unsubscribe(self, *args)
            self._subscribers = max(0, self._subscribers - 1)
        finally:
            self._subscription_lock.release()
       
    def skip_cache(self):
        # forces an update if there are consumers.
        self._cached_result = None    
    
    def refresh_value(self):
        status = self._get_status()
        self.update_value(status)
        return bool(status)
        
    def update_value(self, value):
        if self._cached_result:
            previous = self._cached_result.value
            if value == previous:
                return False
            changes = self._cached_result.changes + 1
        else:
            changes = 1
            previous = None
        self._cached_result = Result(value, uptime.secs(), 1, changes)
        self._trigger_cov(previous, value, time.time())
        return True
    
    def get(self, asyncok=True):
        return self.get_result().value
    
    def get_result(self, skipCache=0):
        if not self._cached_result or skipCache:
            self.refresh_value()
        return self._cached_result

    def _trigger_cov(self, old_value, new_value, timestamp=None):
        if not timestamp:
            timestamp = time.time()
        cov = ChangeOfValueEvent(self, old_value, new_value, timestamp)
        self.event_generate(cov)
        
    # compensate for the average delay in future scheduling considerations.
    def _get_exec_delay(self):
        avg_delay = 0
        for delay in self._exec_delay:
            avg_delay += delay
        if avg_delay:
            avg_delay = avg_delay / len(self._exec_delay)
        return avg_delay
    exec_delay = property(_get_exec_delay)
Example #39
0
class Device(CompositeNode, UpdateMixin):
    def __init__(self):
        self._subscription_lock = Lock()
        self._subscribed = 0
        self._subscribers = {}
        self._last_value = None
        self._last_rcvd = None
        self._decode_indexes = {}
        return
    def configure(self, cd):
        super(Device, self).configure(cd)
        set_attribute(self, 'ttl', 60, cd, int)
        set_attribute(self, 'swid', '', cd)
        return
    def configuration(self):
        cd = super(Device, self).configuration()
        get_attribute(self, 'ttl', cd)
        get_attribute(self, 'swid', cd)
        return cd
    def start(self):
        if self.swid:
            self.url = BASE_URL % (self.station.host, self.swid)
            self._rqst = JaceRequest(self.url, ttl=self.ttl)
        super(Device, self).start()
        return
    def can_bundle(self):
        return bool(self.swid)
    def subscribe(self, name, func):
        self._subscription_lock.acquire()
        try:
            ##
            # if there are multiple external consumers, they are subscribed
            # via event producing child node.
            self._subscribers[name] = func
            self._subscribed += 1
            if self._last_value and (uptime.secs() - self._last_rcvd) < self.ttl:
                try:
                    value = self._last_value.get(name)
                    func(value)
                except:
                    pass
            if self._subscribed == 1:
                self.update_continuous(None)
        finally:
            self._subscription_lock.release()
        return
    def unsubscribe(self, name):
        self._subscription_lock.acquire()
        try:
            assert self._subscribed, 'Cannot decrement subscribers below 0'
            del self._subscribed[name]
            self._subscribed -= 1
        finally:
            self._subscription_lock.release()
        return
    def event_has_subscribers(self):
        return bool(self._subscribed)
    def _load_indexes(self, data):
        d_len = len(data)
        for name in self._subscribers.keys():
            index = offset = 0
            for l in data:
                if l.count('<'+name+'>'):
                    break
                index += 1
            for l in data[index:]:
                if l.count('<value>'):
                    break
                offset += 1
            if (index+offset) > d_len:
                index = offset = None
            self._decode_indexes[name] = (index, offset)
        return
    def _get_indexes(self, name):
        return self._decode_indexes.get(name, (None, None))
    def _have_indexes(self):
        indexes = self._decode_indexes.keys()
        for interest in self._subscribers.keys():
            if interest not in indexes:
                return False
        return True
    def decode(self, data_s):
        if data_s.startswith('<!--'):
            data_s = data_s[(data_s[1:].find('<')+1):]
        data = data_s.split('\n')
        if not self._have_indexes():
            self._load_indexes(data)
        values = {}
        for name in self._subscribers.keys():
            index,offset = self._get_indexes(name)
            try:
                if not data[index].count(name) or not data[index+offset].count('value'):
                    return self._decode_slow(data_s)
                l = data[index+offset]
                values[name] = l.split('>')[1].split('<')[0]
            except:
                return self._decode_slow(data_s)
        return values
    def _decode_slow(self, data):
        try:
            data_o = xml2code(data)
        except:
            data_o = None
        return data_o
    def update_cache(self, value_obj):
        for name, func in self._subscribers.items():
            value = value_obj.get(name)
            func(value)
        self._last_value = value
        self._last_rcvd = uptime.secs()
        return
    def _get_station(self):
        return self.parent
    station = property(_get_station)
Example #40
0
class PeriodicColumn(Column, EventConsumerMixin):
    def __init__(self):
        self.function = None
        self._calculator = None
        self.__node = None
        self.__node_url = None
        self.__lock = Lock()
        self.__started = 0
        self._sid = None
        self._present_value = None
        Column.__init__(self)
        EventConsumerMixin.__init__(self, self.change_of_value)

    ##
    # @author Craig Warren
    # @param config
    #   @key context Sets the context for the function passed in
    #   @value anything Possible context 'import time' because the function
    #                 uses the time modual
    #   @default None
    #   @key function Function to return the value to record
    #   @value function
    #   @required
    #   @key args Arguments to the function
    #   @value list a list of arguments required by the function
    #   @default an empty list
    # @return None
    #
    def configure(self, config):
        Column.configure(self, config)
        set_attribute(self, 'context', 'None', config, str)
        set_attribute(self, 'function', REQUIRED, config)
        set_attribute(self, 'use_subscription_manager', 1, config, int)
        ##
        # @fixme HACK to work around too much voodoo to fix right now.
        self.__function_attribute = self.function
        set_attribute(self, 'conversion', as_magnitude, config, _function)
        self.original_function = self.function
        if type(self.function) == types.StringType:
            self.function = string.replace(
                self.function, 'self.',
                'as_internal_node("%s").' % as_node_url(self))
        set_attribute(self, 'args', '()', config)
        # fix for bad configuration
        if self.args == '':
            self.args = '()'
        self.__function_config = self.function
        self._last_time = None
        self._last_value = None
        self._period = self.parent.parent.period

    def start(self):
        Column.start(self)
        if (type(self.__function_config) == types.StringType
                and string.count(self.__function_config, 'as_node') == 1
                and self.__function_config.endswith('get')):
            func = self.__function_config
            self.__node = as_node(func[func.find('(') + 2:func.rfind(')') - 1])
            if self.use_subscription_manager:
                self._sid = SM.create_delivered(self,
                                                {1: as_node_url(self.__node)})
                self.function = self.get_last
            else:
                self.function = getattr(self.__node,
                                        func[func.rfind('.') + 1:])
        rexec = self.parent.parent.get_environment()
        self.original_function = RFunction(self.function,
                                           args=self.args,
                                           context=self.context,
                                           rexec=rexec)
        self.function = self._convert
        self.variables = {}
        nodes = self.children_nodes()
        for potential_calculator in nodes:
            if hasattr(potential_calculator, 'evaluate'):
                if self._calculator:  #oops
                    raise EAttributeError('Too many calculator nodes', self)
                self._calculator = potential_calculator
                self.function = self._evaluate  # hook the calculator in
        self.__original_function = self.original_function
        self.original_function = self.__evaluate_original_function
        self.__started = 1

    def stop(self):
        self.__started = 0
        Column.stop(self)
        self.variables = None
        self._calculator = None
        if self._sid:
            SM.destroy(self._sid)
            self._sid = None

    def __evaluate_original_function(self):
        if not self.__started:
            msglog.log(
                'broadway', msglog.types.WARN, 'Attempting to get value of '
                'unstarted Column.  Will attempt start.')
            try:
                self.start()
            except:
                self.stop()
                raise
            msglog.log('broadway', msglog.types.INFO, 'Column Start succeeded')
        elif self.__node is not None and self.__node.parent is None:
            self.__lock.acquire()
            try:
                # Redoing parent test with Lock acquired.
                #   Prevents unecessary locking.
                if self.__node.parent is None:
                    msglog.log(
                        'broadway', msglog.types.WARN,
                        'Source node for Column %s has no parent' % self.name)
                    msglog.log('broadway', msglog.types.WARN,
                               'Stopping Column %s' % self.name)
                    self.stop()
                    msglog.log('broadway', msglog.types.WARN,
                               'Restarting Column %s' % self.name)
                    self.start()
            finally:
                self.__lock.release()
        return self.__original_function()

    def attach_variable(self, name):  #defaults for self.get and calculator.get
        if name == 'now':
            return time.time
        elif name == 'value':
            return self._current_value_
        elif name == 'last_value':
            return self._last_value_
        elif name == 'last_time':
            return self._last_time_
        elif name == 'period':
            return self._period_
        else:
            if debug: print 'Bad Attach:  Attempted to attach to "%s".' % name
            return self._bad_attach_

    def _current_value_(self):
        return self._convert()

    def _last_value_(self):
        if debug: print 'Get last value of :', self._last_value
        return self._last_value

    def _last_time_(self):
        if debug: print 'Get last time of :', self._last_time
        return self._last_time

    def _period_(self):
        if debug: print 'Get period : ', self._period
        return self._period

    def _bad_attach_(self):
        raise EAttributeError('attempt to attach to non-existant variable',
                              self)

    ##
    # our hook to replace the function attribute with a calculator
    def _evaluate(self):
        now = self.scheduled_time()
        value = self._current_value_()
        if debug: print 'Now :', now, value
        answer = self._calculator.evaluate({'now': now, 'value': value})
        if debug: print 'Lasts :', now, value
        self._last_time = now
        self._last_value = value
        return answer

    ##
    # Hook for getting scheduled time from a periodic_column.
    # Mostly here for backwards-compatibility, function should
    # be self.parent.scheduled_time.
    #
    # @return Timestamp of current run.
    #
    def scheduled_time(self):
        if self.parent is None:
            return time.time()
        return self.parent.parent.scheduled_time()

    ##
    # @author Craig Warren
    # @return dictionary
    #   the current configuration dictionary
    #
    def configuration(self):
        config = Column.configuration(self)
        get_attribute(self, 'context', config)
        get_attribute(self, 'conversion', config, _name)
        config['function'] = self.__function_attribute
        get_attribute(self, 'args', config)
        return config

    def _convert(self):
        return self.conversion(self.original_function())

    def get(self, skipCache=0):  #async get from nodebrowser
        if self._calculator:
            return self._calculator.get()
        if not callable(self.function):
            raise ENotStarted('Function not callable, usually ' +
                              'means get called before start')
        return self._convert()

    def change_of_value(self, event):
        self._present_value = event.results()[1]['value']

    def get_last(self):
        if isinstance(self._present_value, Exception):
            raise self._present_value
        if not self._present_value:
            self._present_value = self.__node.get()
        return self._present_value

    def get_source_node_url(self):
        # HACK to get the source node URL
        if type(self.function) == types.StringType:
            return self.function.split('"')[1]
        return 'Unknown URL'
Example #41
0
class PeriodicExporter(Exporter, EventConsumerMixin):
    def __init__(self):
        Exporter.__init__(self)
        EventConsumerMixin.__init__(self, self.handle_connected,
                                    self.connection_event_error)
        self.running = 0
        self._scheduled = None
        self._lock = Lock()

    def handle_connected(self, event):
        self.msglog('%s Got connection event' % self.name)
        if event.__class__ == ConnectionEvent:
            self.msglog('Connection state is %s.' % str(event.state))
            if event.state == ConnectionEvent.STATE.UP:
                self.msglog('Going to start export.')
                self.go()
        else:
            msg = (('Unknown event recieved by %s from %s.' %
                    (self.name, str(self.connection_node))) +
                   ' Event: %s' % str(event))
            msglog.log('broadway', msglog.types.WARN, msg)

    def connection_event_error(self, exc, event):
        msg = ('Connection Event for ' + str(self.connection_node) +
               ' had the following Error\n' + 'Event: ' + str(event) +
               'Error: ' + str(exc))
        msglog.log('broadway', msglog.types.WARN, msg)

    def msglog(self, msg, force=0):
        if self.debug or force:
            msglog.log('broadway.mpx.service.data.periodic_exporter',
                       msglog.types.DB, msg)

    def configure(self, config):
        map_to_attribute(self, 'period', 900, config, map_to_seconds)
        if self.period == 0:
            raise EInvalidValue('period', self.period,
                                'Export period cannot be 0')
        set_attribute(self, 'debug', 0, config, as_boolean)
        set_attribute(self, 'synchronize_on', '00:00', config)
        set_attribute(self, 'timeout', 60, config, int)
        set_attribute(self, 'connection_node', '/services/network', config)
        set_attribute(self, 'connection_attempts', 3, config, int)
        set_attribute(self, 'always_export', 0, config, as_boolean)
        set_attribute(self, 'breakup_on_period', 0, config, as_boolean)
        Exporter.configure(self, config)
        self._time = _TimeStore(self)

    def configuration(self):
        config = Exporter.configuration(self)
        map_from_attribute(self, 'period', config, map_from_seconds)
        get_attribute(self, 'connection_node', config)
        get_attribute(self, 'debug', config, str)
        get_attribute(self, 'connection_attempts', config)
        get_attribute(self, 'timeout', config)
        get_attribute(self, 'always_export', config, str)
        get_attribute(self, 'synchronize_on', config)
        get_attribute(self, 'breakup_on_period', config, str)
        return config

    def start(self):
        Exporter.start(self)
        if not self.running:
            node = as_node(self.connection_node)
            if hasattr(node, 'event_subscribe'):
                node.event_subscribe(self, ConnectionEvent)
            else:
                if self.debug:
                    msg = ('Connection node: ' + str(self.connection_node) +
                           ' is not an event producer.')
                    msglog.log('broadway', msglog.types.INFO, msg)
            self.connection = node
            self.running = 1
            self._init_next_time()
            self._schedule()
        else:
            raise EAlreadyRunning

    def stop(self):
        self.running = 0
        if self._scheduled is not None:
            try:
                self._scheduled.cancel()
            except:
                pass
        Exporter.stop(self)

    def go(self, end_time, start_time=None):
        if self._lock.locked():
            msglog.log('broadway',msglog.types.WARN, \
                       'Last export still active, skipping current request.')
            return
        Exporter_ThreadPool.queue_noresult(self._complete, end_time,
                                           start_time)

    def scheduled_time(self):
        return self.next_time() - self.period

    def last_time(self):
        return self._time.get_last_time()

    def _schedule(self):
        next = self.next_time()
        self._scheduled = scheduler.at(next, self.go, (next, ))

    def _complete(self, end_time, start_time=None):
        self._lock.acquire()
        try:
            self._export(end_time, start_time)
        except:
            msglog.exception()
        self._lock.release()
        if self.running:
            self._schedule()

    ##
    #
    def _init_next_time(self):
        time_format = '%Y%m%d %H:%M:%S'
        sync_format = '%Y%m%d ' + self.synchronize_on + ':00'
        current_time = int(time.time())
        f_sync = time.strftime(sync_format, self.time_function(current_time))
        f_now = time.strftime(time_format, self.time_function(current_time))
        sync = time.mktime(time.strptime(f_sync, time_format))
        now = time.mktime(time.strptime(f_now, time_format))
        if now > sync:
            # sync time in past, add one day to sync time.
            sync += map_to_seconds({'days': 1})
        gap = sync - now
        if self.period > gap:
            sync_time = current_time + gap
        else:
            sync_time = current_time + (gap % self.period)
        #
        #
        #
        self._next_time = sync_time
        return self._next_time

    def next_time(self):
        current_time = time.time()
        while self._next_time < current_time:
            self._next_time += self.period
        return self._next_time

    def data_since_export(self):
        start_time = self.last_time()
        end_time = self.next_time()
        return self.log.get_slice('timestamp', start_time, end_time)

    def formatted_data_since_export_as_string(self):
        length = 0
        stream = self.formatted_data_since_export()
        text = stream.read(1024)
        while len(text) > length:
            length = len(text)
            text += stream.read(1024)
        return text

    def formatted_data_since_export(self):
        return self.formatter.format(self.data_since_export())

    def export_data_since_export(self):
        return self.transporter.transport(self.formatted_data_since_export())

    def _export(self, end_time, start_time=None):
        attempts = 0
        connected = 0
        while attempts < self.connection_attempts:
            self.msglog('Acquiring connection %s.' % str(self.connection_node))
            try:
                connected = self.connection.acquire()
            except:
                msglog.exception()
            if connected:
                self.msglog('Connection acquired')
                break
            attempts += 1
            self.msglog('Connection acquire failed %s times.' % attempts)
        else:
            raise EConnectionError('Failed to connect %s times' % attempts)
        try:
            if start_time is None:
                start_time = self.last_time()
            if start_time == 0 and self.breakup_on_period:
                self.msglog('Start Time is 0 and set to Break on Transfer.')
                start_time = self.log.get_first_record()['timestamp']
                self.msglog('Start Time set to timestamp of first row: %s' %
                            time.ctime(start_time))
            retrieve = self.log.get_slice
            if self.log.name == 'msglog':
                msglog.log('msglog.exporter', msglog.types.INFO,
                           'repr(mpx.properties)\n%s\n' % (repr(properties)))
                retrieve = self.log.get_range
                end_time = time.time()
            end = end_time
            if self.breakup_on_period:
                self.msglog('Breaking on period')
                end = start_time + self.period
            self.msglog('Full export of slice from %s to %s' %
                        (time.ctime(start_time), time.ctime(end_time)))
            while start_time != end_time:
                if end > end_time:
                    self.msglog(
                        'End greater than End Time.  Resetting to End Time')
                    end = end_time
                self.msglog('Going to export slice from %s to %s' %
                            (time.ctime(start_time), time.ctime(end)))
                data = retrieve('timestamp', start_time, end)
                if (not data) and (not self.always_export):
                    raise ENoData('timestamp', start_time, end)
                self.msglog('Sending data to formatter.')
                try:
                    output = self.formatter.format(data)
                    if not output is None:
                        self.msglog('Sending formatted data to transporter.')
                        self.transporter.transport(output)
                    start_time = end
                except EBreakupTransfer, e:
                    entry = e.break_at
                    if entry['timestamp'] == end:
                        # prevents loop where transporter is just failing.
                        raise EIOError('EBreakupTransfer not progressing.')
                    end = entry['timestamp']
                    msglog.log(
                        'broadway', msglog.types.WARN,
                        'Breaking up data transfer at %s.' % time.ctime(end))
                else:
                    self._time.set_last_time(start_time)
                    self.msglog('Data transported')
                    end = start_time + self.period
        finally:
            if hasattr(self.formatter, 'cancel'):
                self.formatter.cancel(
                )  # prevent mult copies of data at next successful transport
            if connected:
                self.msglog('Releasing connection.')
                self.connection.release()

    def nodebrowser_handler(self, nb, path, node, node_url):
        html = nb.get_default_view(node, node_url)
        html += '<h4>Commands</h4>\n'
        s = '%s?action=invoke&method=do_export' % self.name
        html += '<a href="%s">Force export via nodebrowser.</a>' % (s, )
        return html

    def do_export(self, end_time=None, start_time=None):
        if end_time is None:
            end_time = time.time()
        self.go(end_time, start_time)
        return 'Export triggered.'
Example #42
0
class NewUser(PersistentDataObject):
    #USERS = _UserDictionary()
    def __init__(self,
                 name,
                 password_file=PASSWD_FILE,
                 group_file=GROUP_FILE,
                 shadow_file=SHADOW_FILE):
        self.__lock = Lock()
        self.__password_file = password_file
        self.__group_file = group_file
        self.__shadow_file = shadow_file
        self.meta = {}
        self.USERS.load()
        if not self.USERS.has_key(self.name()):
            msglog.log('broadway', msglog.types.INFO,
                       ('No profile for user %s found, creating'
                        ' new profile' % name))
            self.USERS[self.name()] = str(UUID())
        PersistentDataObject.__init__(self, self.USERS[self.name()])
        PersistentDataObject.load(self)

    def save(self):
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            passwd_db.load()
            passwd_db[self.name()] = self.password_entry()
            passwd_db.save()

            # save /etc/shadow content
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            shadow_db[self.name()] = self.shadow_entry()
            shadow_db.save()
        finally:
            self.__lock.release()
        self.load(self.name())

    def name(self):
        return self.__user.user()

    def group(self):
        return self.__groups[0].group()

    def groups(self):
        group_db = GroupFile(self.__group_file)
        group_db.load()
        return self.__user.groups(group_db)

    def group_ids(self):
        ids = []
        for group in self.groups():
            ids.append(group.gid())
        return ids

    def type(self):
        return self.__user.user_type()

    def set_type(self, type):
        raise ENotImplemented(self.set_type)

    def password(self):
        raise ENotImplemented(self.password)

    def set_password(self, password):
        self.__shadow.crypt(crypted_password(self.__name, password))

    def crypt(self):
        return self.__shadow.crypt()

    def set_crypt(self, crypt):
        self.__shadow.crypt(crypt)

    def uid(self):
        return self.__user.uid()

    def set_uid(self, uid):
        self.__user.uid(uid)

    def gid(self):
        return self.__groups[0].gid()

    def set_gid(self, gid):
        self.__user.gid(gid)

    def gids(self):
        gids = []
        for group in self.__groups:
            gids.append(group.gid())
        return gids

    def set_gids(self, gids):
        raise ENotImplemented(self.set_gids)

    def gecos(self):
        return self.__user.gecos()

    def set_gecos(self, gecos):
        self.__user.gecos(gecos)

    def directory(self):
        return self.__user.directory()

    def set_directory(self, directory):
        self.__user.directory(directory)

    def shell(self):
        return self.__user.shell()

    def set_shell(self, shell):
        self.__user.shell(shell)

    def is_dirty(self):
        return not self.__loaded

    def set_meta_value(self, name, value):
        self.meta[name] = value
        PersistentDataObject.save(self)

    def get_meta_value(self, name, default=None):
        if self.meta.has_key(name):
            return self.meta[name]
        return default

    def get_meta(self):
        return self.meta.copy()

    def __getitem__(self, name):
        return self.get_meta_value(name)

    def __setitem__(self, name, value):
        return self.set_meta_value(name, value)

    def password_entry(self):
        return self.__user

    def shadow_entry(self):
        return self.__shadow

    def group_entry(self):
        return self.__groups[0]

    def group_entries(self):
        return self.__groups
Example #43
0
 class SequenceStati(PersistentDataObject):
     def __init__(self, node):
         self.__lock = Lock()
         self.__last_save = {}
         self.max_seq = -1
         self.pending_seqs = []
         self.inprocess_seqs = []
         PersistentDataObject.__init__(self, node, auto_load=True)
         return
     def __snapshot(self, attrs):
         self.__last_save = {}
         for attr in attrs:
             self.__last_save[attr] = copy.copy(getattr(self,attr))
         return
     def __changed(self):
         if not self.__last_save:
             return True
         for attr in self.__last_save.keys():
             if not self.__last_save.has_key(attr):
                 return True
             if self.__last_save[attr] != getattr(self,attr):
                 return True
         return False
     def __load(self):
         result = PersistentDataObject.load(self)
         self.__snapshot(self.loaded())
         return result
     ##
     # @note Referrers should not call this method as the state is best
     #       maintained via the sequence processing methods:
     #       QUEUE_PENDING(), SEQUENCE_TO_PROCESS(), SEQUENCE_PROCESSED(),
     #       and SEQUENCES_PROCESSED().
     def load(self):
         self.__lock.acquire()
         try:
             return self.__load()
         finally:
             self.__lock.release()
         raise EUnreachableCode()
     def __save(self):
         result = PersistentDataObject.save(self)
         self.__snapshot(self.saved())
         return result
     ##
     # @note Referrers should not call this method as the state is best
     #       maintained via the sequence processing methods:
     #       QUEUE_PENDING(), SEQUENCE_TO_PROCESS(), SEQUENCE_PROCESSED(),
     #       and SEQUENCES_PROCESSED().
     def save(self):
         self.__lock.acquire()
         try:
             return self.__save()
         finally:
             self.__lock.release()
         raise EUnreachableCode()
     def __too_stale(self):
         return self.__changed() and True
     def __save_if_stale(self, force_save=False):
         if not force_save:
             force_save = self.__too_stale()
         if force_save:
             self.__save()
         return
     def save_if_stale(self):
         self.__lock.acquire()
         try:
             return self.__save_if_stale()
         finally:
             self.__lock.release()
         raise EUnreachableCode()
     ##
     # Add a sequence number to the pending queue.
     def __queue_pending(self, seq):
         if seq not in self.pending_seqs:
             self.pending_seqs.append(seq)
             self.pending_seqs.sort()
             if seq > self.max_seq:
                 self.max_seq = seq
         return
     ##
     # Add a sequence number to the pending queue.
     def queue_pending(self, seq):
         self.__lock.acquire()
         try:
             self.__queue_pending(seq)
         finally:
             self.__lock.release()
         return
     ##
     # Return (pop) the first sequence number from the pending queue,
     # and add it to the in-process queue.
     def sequence_to_process(self):
         self.__lock.acquire()
         try:
             if self.pending_seqs:
                 seq = self.pending_seqs.pop(0)
                 if seq not in self.inprocess_seqs:
                     self.inprocess_seqs.append(seq)
                 return seq
         finally:
             self.__lock.release()
         return None
     ##
     # Sequence was successfully processed (exported).
     def __sequence_processed(self, seq):
         if seq in self.pending_seqs:
             self.pending_seqs.remove(seq)
             pass # @fixme log weirdness.
         if seq not in self.inprocess_seqs:
             pass # #fixme Log wierdness.
         else:
             self.inprocess_seqs.remove(seq)
         return
     ##
     # Sequence was successfully processed (exported).
     # @note This commits changed to the PDO.
     def sequence_processed(self, seq):
         self.__lock.acquire()
         try:
             self.__sequence_processed(seq)
             self.__save_if_stale()
         finally:
             self.__lock.release()
         return
     ##
     # Sequences were successfully processed (exported).
     # @note This commits changed to the PDO.
     def sequences_processed(self, seqs):
         self.__lock.acquire()
         try:
             for seq in seqs:
                 self.__sequence_processed(seq)
             self.__save_if_stale()
         finally:
             self.__lock.release()
         return
     ##
     # Simulate pending sequence numbers for all sequence numbers that do
     # not appear to have been exported.
     # @note Consumer must deal with sequence numbers that they don't have
     #       in there event queue by looking them up in the Log.  In the
     #       case of looking up a sequence, consumers should call
     #       sequence_processed() if the sequence does not exist so this
     #       object knows not to simulate that number again.
     def __recover_pending(self, upto=-1):
         inprocess = self.inprocess_seqs
         self.inprocess_seqs = []
         self.pending_seqs.extend(inprocess)
         self.pending_seqs.sort()
         start_seq = self.max_seq + 1
         if self.pending_seqs:
             start_seq = max(start_seq, self.pending_seqs[-1]+1)
         for seq in range(start_seq, upto+1):
             self.__queue_pending(seq)
         return
     ##
     # Simulate pending sequence numbers for all sequence numbers that do
     # not appear to have been exported.
     def recover_pending(self, upto=-1):
         self.__lock.acquire()
         try:
             self.__recover_pending(upto)
         finally:
             self.__lock.release()
         return
     pass
Example #44
0
class TcpTunnel(ImmortalThread):
    def __init__(self, vcp):
        #super(TcpTunnel, self).__init__(self)
        ImmortalThread.__init__(self)
        self._needs_reconfig = 0
        # link to the port object
        self._vcp = vcp
        self._lock = Lock()
        self._op_lock = Lock()
        # list of operations to apply to an {out|in}bound tcp 
        # segment. In the future this might include operations 
        # such as encryption or compression - for now, only the
        # header info. that is applied by devices such as 
        # Lantronix's UDS-10 is supported.  Up refers to what is
        # being applied to outbound tcp data, down to received.
        # Methods should be added to these lists in the order they
        # are to be applied.
        self._up_segment_ops = []
        self._down_segment_ops = []
        # transaction identifier - used only in vcp mode.
        self.__tid = 0
        self._pending_tid = 0
        # one and only poll object.  socket, serial fd and
        # command pipe are all polled.
        self._poll_obj = None
        # command pipe allows other threads to insert control
        # messages.
        self._cmd_pipe = None
        # both sides (serial & socket) of the tunnel
        self._sock_listen_fd = None
        self._sock_fd = None
        self._serial_port = None
        # tcp state management
        self._is_connected = 0
        self.is_active = 0
        # tunnel statistics
        self.tcp_bytes_rcvd = 0
        self.tcp_bytes_sent = 0
        self.serial_bytes_rcvd = 0
        self.serial_bytes_sent = 0
        self.connection_attempts = 0
            
    def configure(self, config):
        self.tty = '/dev/tty' + config['dev'][-2:]
        self.tcp_port = int(config['tcp_port'])
        self.mode = config['mode']
        self.timeout_msec = config['p_timeout_msec']

        if self.mode == 'vcp':
            if self._up_segment_ops.count(self._add_vcp_header) == 0:
                self._up_segment_ops.append(self._add_vcp_header)
            if self._down_segment_ops.count(self._remove_vcp_header) == 0:
                self._down_segment_ops.append(self._remove_vcp_header)
        self.is_server = int(config['is_server'])
        if self.is_server == 0:
            self.host = config['host']  # who we're connecting to.
        if self.is_active:
            # tunnel is being reconfigured "in flight".
            self._needs_reconfig = 1
            self._send_cmd('reconfig')
            if self._is_in_accept():
                self._clear_accept()
                    
    def run(self):
        self._needs_reconfig = 0
        if self.is_active:
            # we've restarted due to a configuration change
            self.start_tunnel()
        else:
            if not self._cmd_pipe:
                # set up the command pipe and begin to build the poll obj.
                self._cmd_pipe = os.pipe()
                self._poll_obj = select.poll()
                self._poll_obj.register(self._cmd_pipe[READ],
                    select.POLLIN | select.POLLERR | select.POLLHUP)
        while 1:
            # no poll timeout, wait here until kicked to start.
            evt = self._poll_obj.poll(-1)
            if evt[0][0] == self._cmd_pipe[READ]:
                if evt[0][1] == select.POLLIN:
                    cmd = os.read(self._cmd_pipe[READ], 32)
                    if cmd.find('start') >= 0:
                        self.start_tunnel()
            # cmd pipe poll err, critical, hard restart
            self._cmd_pipe = None
            self._poll_obj = None
            raise ERestart()
                        
    def start_tunnel(self):
        if self is not currentThread():
            self._send_cmd('start')
            return
        self._op_lock.acquire()
        try:
            self.is_active = 1
            # set up Port object for the tunnel that reads\writes to the 
            # slave device file of the pseudo-terminal pair. 
            if not self._serial_port:
                self._serial_port = Port()
            cfg = self._vcp.configuration()
            cfg['dev'] = self.tty
            cfg['name'] = '_slave'
            cfg['parent'] = self._vcp
            self._serial_port.configure(cfg)
            self._op_lock.release()
        except:
            self._op_lock.release()
        while self.is_active:
            if not self._serial_port.is_open():
                self._serial_port.open()
                self._serial_port.drain()
            try:
                if self.is_server:
                    self._do_listen()
                else:
                    self._do_connect()
            except:
                msglog.exception()
                if self._serial_port and not self._serial_port.is_open():
                    self._serial_port.close()

    def stop_tunnel(self):
        self.is_active = 0
        if self is not currentThread():
            self._send_cmd('stop')
            if self._is_in_accept():
                self._clear_accept()
        else:
            self._op_lock.acquire()
            try:
                self._tear_down_fds()
            finally:
                self._op_lock.release()
            #raise ERestart()
        
    def is_connected(self):
        self._op_lock.acquire()
        result = self._is_connected
        self._op_lock.release()
        return result
                                
    def _create_socket(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        return s
            
    def _close_socket(self):
        self._is_connected = 0
        self._sock_fd.close()
            
    def _send_cmd(self, cmd):
        self._lock.acquire()
        try:
            os.write(self._cmd_pipe[WRITE], cmd)
        finally:
            self._lock.release()
            
    def _do_connect(self):
        while self.is_active:
            self._sock_fd = self._create_socket()
            while not self._is_connected:
                self.connection_attempts += 1
                try:
                    self._sock_fd.connect((self.host, self.tcp_port))
                    self._is_connected = 1
                except socket.gaierror, e:
                    # host related error, ie. hostname not resolving - possibly transient
                    self.connection_attempts += 1
                    msglog.log('VCP', WARN, 'Error resolving hostname %s.' % self.host)
                    time.sleep(60)
                    raise EConnectionError
                except socket.error, e:
                    # connection error, possibly transient - sleep for a bit and retry
                    self.connection_attempts += 1
                    time.sleep(30) 
            if self._needs_reconfig:
                self._is_connected = 0
                self._tear_down_fds()
                raise ERestart()
            # loop in _do_tunnel until the tcp connection or the framework 
            # based consumer (ie. protocol) "goes away".
            self._do_tunnel()
            self._is_connected = 0
Example #45
0
class _ServerTSM:
    IDLE = 'idle'  # ASHRAE 135-1995 5.4.5.1
    SEGMENTED_REQUEST = 'segmented request'  # ASHRAE 135-1995 5.4.5.2
    AWAIT_RESPONSE = 'await response'  # ASHRAE 135-1995 5.4.5.3
    SEGMENTED_RESPONSE = 'segmented response'  # ASHRAE 135-1995 5.4.5.4
    COMPLETE = 'complete'  # IDLE with a result.

    def __init__(self, device, interface, network, source, apdu):
        global _module_lock
        global _tsm_q
        self._cv = Lock()
        self.timestamp = now()
        #self.network = network
        self.device = device
        self.address = device.address
        self.request = apdu
        self.network = network
        self.source = source
        self.exception = None
        self.state = self.IDLE
        # maximum npdu length is from three sources, the interface, the device
        # and the request.  NPDU header is 21 bytes max
        # max_apdu_len is the number of bytes, max_apdu_length_accepted is an encoded value
        mtu = min(interface.mtu - _network._MAX_APDU_OVERHEAD,
                  device.max_apdu_len)
        #read max apdu size from request and make sure we pick the leastest
        # length is encoded according to bacnet 20.1.2.5
        # this issue also shows up in lib.c bacnet_send_message
        if apdu.max_apdu_length_accepted < len(DECODE_MAX_APDU_LENGTH):
            mtu = min(mtu,
                      DECODE_MAX_APDU_LENGTH[apdu.max_apdu_length_accepted])
        self.send_segment_size = mtu
        # use either interface or device timeout/retry values
        self.T_seg = interface.T_seg
        if device.T_seg is not None:
            self.T_seg = device.T_seg
        self.T_wait_for_seg = interface.T_wait_for_seg
        if device.T_wait_for_seg is not None:
            self.T_wait_for_seg = device.T_wait_for_seg
        self.T_out = interface.T_out
        if device.T_out is not None:
            self.T_out = device.T_out
        self.N_retry = interface.N_retry
        if device.N_retry is not None:
            self.N_retry = device.N_retry

        self.retry_count = 0
        self.segment_retry_count = 0
        self.sent_all_segments = 0
        self.last_sequence_number = 0
        self.initial_sequence_number = 0
        self.actual_window_size = None
        self.proposed_window_size = None
        self.segment_timer = None
        self.response = None
        self.invoke_id = apdu.invoke_id

        return

    def process_state(self, msg):
        try:
            self._cv.acquire()
            if self.state == self.IDLE:
                self._idle_state(msg)
            elif self.state == self.SEGMENTED_REQUEST:
                self._segmented_request_state(msg)
            elif self.state == self.SEGMENTED_RESPONSE:
                self._segmented_response_state(msg)
            elif self.state == self.COMPLETE:
                self._completed_state(msg)
            else:
                raise EIOError('Illegal TSM state')
        finally:
            self._cv.release()

    def _idle_state(self, msg):
        if not msg:
            self._stop_segment_timer()
            return  #no timers while idle
        # @fixme Validate APDU...
        if msg.pdu_type == BACNET_CONFIRMED_SERVICE_REQUEST_PDU:
            if msg.segmented_message == 1:
                if msg.sequence_number == 0:
                    self.request = APDU(msg)
                    self.actual_window_size = msg.window_size
                    self._send_segment_ack(msg)
                    self.segment_retry_count = 0
                    self._start_segment_timer()
                    self.last_sequence_number = 0
                    self.initial_sequence_number = 0
                    self.state = self.SEGMENTED_REQUEST
                    return
                else:  #bad sequence number
                    self._UnexpectedPDU_Received(msg)
                    return
            else:  #unsegmented, get response now
                self._send_response(
                    confirmed_service_indication(self.network, self.device,
                                                 msg))
                return
        elif msg.pdu_type == BACNET_ABORT_PDU:
            self._complete()
            return
        elif msg.pdu_type == BACNET_SEGMENT_ACK_PDU and msg.server == 0:
            self._UnexpectedPDU_Received(msg)
        else:
            print 'unexpected packet ignored in bacnet server idle state'
            pass  #silence

    def _segmented_request_state(self, msg):
        if msg:
            if msg.pdu_type == BACNET_CONFIRMED_SERVICE_REQUEST_PDU:
                if msg.segmented_message == 1:
                    if msg.sequence_number == int((self.last_sequence_number +
                                                   1) & 0xff):
                        if msg.more_follows == 1:  #still more coming in
                            self.request.data.fromstring(
                                msg.data)  #append data to buffer
                            self.last_sequence_number = int(
                                (self.last_sequence_number + 1) & 0xff)
                            if msg.sequence_number == int(
                                (self.initial_sequence_number +
                                 self.actual_window_size) & 0xff):
                                self.initial_sequence_number = self.last_sequence_number
                                self._send_segment_ack(msg)
                            self.segment_retry_count = 0
                            self._start_segment_timer()
                            return
                        else:  #final segment has been received
                            self.request.data.fromstring(
                                msg.data)  #append data to buffer
                            self.last_sequence_number = int(
                                (self.last_sequence_number + 1) & 0xff)
                            self._stop_segment_timer()
                            self._send_segment_ack(msg)
                            self.initial_sequence_number = self.last_sequence_number
                            self._send_response(
                                confirmed_service_indication(
                                    self.network, self.device, self.request))
                            return
                    else:  #segment received out of order
                        self._send_segment_nack(msg.invoke_id,
                                                self.last_sequence_number)
                        self.segment_retry_count = 0
                        self._start_segment_timer()
                        self.initial_sequence_number = self.last_sequence_number
                        return
            elif msg.pdu_type == BACNET_ABORT_PDU:
                self._complete()
                return
            _UnexpectedPDU_Received(msg)
        else:  #since clock tick
            t = self.segment_timer
            if t:
                if t.executing() or t.expired():
                    if self.segment_retry_count < 3:
                        #lock?
                        self.segment_retry_count += 1
                        self._start_segment_timer()
                    else:
                        self._complete(None)

    def _segmented_response_state(self, msg):
        if debug:
            print 'Server TSM _segmented_response_state'
        if msg:
            if msg.pdu_type == BACNET_SEGMENT_ACK_PDU:
                if not self._InWindow(msg):  #duplicate ack
                    if debug:
                        print 'ServerTSM: segmented_response_state: duplicate ack'
                    self._start_segment_timer()
                    return  #do nothing self._DuplicateACK_Received(msg)
                else:
                    if self.sent_all_segments:  #final ack received
                        if debug: print '_ServerTSM: final ack received'
                        self._stop_segment_timer()
                        self._complete(msg)
                        return
                    else:
                        self._start_segment_timer()
                        self.segment_retry_count = 0
                        return self._NewACK_Received(msg)
            elif msg.pdu_type == BACNET_ABORT_PDU:
                self._complete(msg)
            elif ((msg.pdu_type == BACNET_SIMPLE_ACK_PDU) and \
                 (self.sent_all_segments == 1)):
                self._complete(msg)
            elif msg.pdu_type == BACNET_CONFIRMED_SERVICE_REQUEST_PDU:  #duplicate
                if msg.segmented_message == 1:
                    self._send_segment_nack(self.request.invoke_id,
                                            self.last_sequence_number)
            else:  #anything else is unexpected
                self._UnexpectedPDU_Received(msg)
        else:  #since tick
            t = self.segment_timer
            if t:
                if t.executing() or t.expired():
                    if self.segment_retry_count <= self.N_retry:
                        #lock?
                        self.segment_retry_count += 1
                        self._start_segment_timer()
                        self._FillWindow()
                    else:
                        self._complete(None)
        pass

    def _NewACK_Received(self, msg):
        if debug: print '_ServerTSM._NewACK_Received'
        self.initial_sequence_number = (msg.sequence_number + 1) % 0x100
        self.actual_window_size = msg.window_size
        self._FillWindow()

    ##
    # @return True if the BACnet-SegmentACK's sequence-number is in the
    #         current transmission window.  Otherwise, false.
    def _InWindow(self, msg):
        window_index = (msg.sequence_number - self.initial_sequence_number) \
                       % 0x100
        return window_index < self.actual_window_size

    def _FillWindow(self):
        if debug:
            print 'ServerTSM: enter _FillWindow', self.initial_sequence_number, self.actual_window_size
        for ix in range(self.initial_sequence_number,
                        self.initial_sequence_number +
                        self.actual_window_size):
            last_seg = len(self.response.data) <= (self.send_segment_size *
                                                   (ix + 1))
            # Send the next segment.
            segment = APDU()
            segment.version = 1
            segment.data_expecting_reply = 1
            segment.pdu_type = BACNET_COMPLEX_ACK_PDU
            segment.segmented_message = 1
            segment.more_follows = not last_seg
            segment.invoke_id = self.request.invoke_id
            segment.sequence_number = (ix % 256)
            segment.window_size = self.proposed_window_size
            segment.choice = self.request.choice
            segment.data = self.response.data[self.send_segment_size *
                                              ix:self.send_segment_size *
                                              (ix + 1)]
            if debug:
                print 'ServerTSM: _FillWindow: ', len(
                    segment.data), last_seg, ix
            segment = segment.as_npdu()
            self._send(segment)
            if last_seg:
                self.sent_all_segments = 1
                return

    def _send_response(self, apdu):
        self.response = apdu
        if debug: print 'ServerTSM:_send_response: ', str(len(apdu.data))
        if is_APDU(apdu):
            if len(apdu.data) <= self.send_segment_size:
                self.send_UnsegmentedComplexOrSimpleAck(apdu.as_npdu())
            else:
                self.send_SegmentedComplexAck(apdu)
        else:
            if len(apdu.data) <= self.send_segment_size:
                self.send_UnsegmentedComplexOrSimpleAck(apdu)
            else:
                self.send_SegmentedComplexAck(apdu)
        return

    def _send(self, response):
        if not _is_master_server(self.device, self.network):
            response.sspec = 1
            response.slen = 6  #what should we chose?
            response.sadr = utils.bytes_as_string_of_hex_values(
                self.device.instance_number, response.slen)
            response.snet = self.device.network  #correct local copy?
        if (hasattr(self.request, 'sspec')) and (
                self.request.sspec
                == 1):  #message came through router, send it back
            response.dspec = 1
            response.dlen = self.request.slen
            response.dadr = self.request.sadr
            response.dnet = self.request.snet
        _send(self.network, self.source, response)

    def _send_segment_ack(self, msg):
        if debug: print '_ServerTSM._send_segment_ack'
        # Acknowledge the final segment
        ack = NPDU()
        ack.version = 1
        ack.data_expecting_reply = 0
        ack.pdu_type = BACNET_SEGMENT_ACK_PDU
        ack.negative_ack = 0
        ack.server = 1
        ack.window_size = self.actual_window_size
        ack.invoke_id = msg.invoke_id
        ack.sequence_number = msg.sequence_number
        self._send(ack)

    def _send_segment_nack(self, invoke_id, sequence_number):
        if debug: print '_ServerTSM._send_segment_nack'
        # Acknowledge the final segment
        ack = NPDU()
        ack.version = 1
        ack.data_expecting_reply = 0
        ack.pdu_type = BACNET_SEGMENT_ACK_PDU
        ack.negative_ack = 1
        ack.server = 1
        ack.window_size = self.actual_window_size
        ack.invoke_id = invoke_id
        ack.sequence_number = sequence_number
        self._send(ack)

    ##
    # BACnet event handler for send_UnsegmentedComplexAck.
    def send_UnsegmentedComplexOrSimpleAck(self, msg):
        if debug: print '_ServerTSM.send_ConfirmedUnsegmented'
        #if self.state != self.IDLE:
        ## @fixme exceptions...
        #raise EIOError('Transaction State Machine in use')
        msg.data_expecting_reply = 0
        msg.server = 1
        self._send(msg)
        self._complete(msg)

    ##
    # BACnet event handler for send_SegmentedComplexAck.
    def send_SegmentedComplexAck(self, msg):
        if debug: print '_ServerTSM.send_SegmentedComplexAck', len(msg.data)
        #if self.state != self.IDLE:
        ## @fixme exceptions...
        #raise EIOError('Transaction State Machine in use')
        self.segment_retry_count = 0
        self.initial_sequence_number = 0
        self.proposed_window_size = 10
        self.actual_window_size = 1
        self._start_segment_timer()
        self.sent_all_segments = 0
        # Send the first segment.
        segment = APDU()
        segment.version = 1
        segment.data_expecting_reply = 1
        segment.server = 1
        segment.pdu_type = BACNET_COMPLEX_ACK_PDU
        segment.segmented_message = 1
        segment.more_follows = 1
        segment.segmented_response_accepted = 1
        segment.invoke_id = msg.invoke_id
        segment.sequence_number = 0
        segment.window_size = self.proposed_window_size
        segment.choice = msg.choice
        segment.data = msg.data[0:self.send_segment_size]
        segment = segment.as_npdu()
        self.state = self.SEGMENTED_RESPONSE
        self._send(segment)

    def _completed_state(self, msg):
        pass


#todo have all TSM exit through this state to clean up timers

    def _complete(self, msg):
        if debug: print 'SM _complete'
        self._stop_segment_timer()
        self.response = msg
        if not msg:
            self.exception = ETimeout
        self.state = self.COMPLETE

    def complete(self):
        return self.state == self.COMPLETE

    def _UnexpectedPDU_Received(self, msg):
        if debug: print '_ServerTSM._UnexpectedPDU_Received'
        # Send an ABORT.
        self._stop_segment_timer()
        abort = npdu.NPDU()
        abort.version = 1
        abort.data_expecting_reply = 0
        abort.pdu_type = BACNET_ABORT_PDU
        abort.server = 1
        abort.invoke_id = msg.invoke_id
        abort.reason = ABORT_REASON_INVALID_APDU_IN_THIS_STATE
        self._send(abort)
        # Give up.
        self.exception = EIOError('Unexpected PDU.')  # @fixme
        self._complete(None)
        return

    def _start_segment_timer(self, timeout=None):
        self._stop_segment_timer()
        if timeout is None:
            timeout = self.T_seg
        self.segment_timer = scheduler.seconds_from_now_do(
            timeout, self._segment_timeout)

    def _stop_segment_timer(self):
        if self.segment_timer:
            self.segment_timer.cancel()
            self.segment_timer = None

    def _segment_timeout(self):
        if debug: print '_ServerTSM: timeout'
        self.process_state(None)
Example #46
0
class EnergywiseDomain(Node):
    def __init__(self):
        self._cpex_lock=RLock() # cpex switch list lock, used for setting/getting the "primary" cpex switch.
        self._cache_lock=Lock() # domain value cache lock.
        self._cpex_switch_map_lock = Lock() # lock for cpex switches cache data structure
        self._cache_value=None
        self._cache_time=0
        self._cpex_switch_map_cache=SwitchMap({})
        self._cpex_switch_map_time=0
        self.ttl=30
        self._reinit()
        return
    def _not_running(self, *args, **kw):
        raise ENotRunning("%r is not running." % self.as_node_url())
    def _empty_domain_usage(self, importance=100, skipCache=False):
        return 0.0
    def _reinit(self):
        self._cpex_switches = []
        self._snmp_switches = []
        self._all_switches = []
        self._all_domains = []
        self.domain = ''
        self.trend_node = None
        self.energywise_domain_usage = self._not_running
        return
    def configure(self, config):
         Node.configure(self, config)
         set_attribute(self, 'ttl', 30, config, int)
         return
    def configuration(self):
         config = Node.configuration(self)
         get_attribute(self, 'ttl', config, str)
         return config
    def start(self):
        self._cpex_lock.acquire()
        try:
            for child in self.children_nodes():
                if isinstance(child,EnergywiseSwitch):
                    if child.PROTOCOL_SNMP == child.protocol:
                        self._snmp_switches.append(child)
                    else:
                        self._cpex_switches.append(child)
                    self._all_switches.append(child)
                elif isinstance(child,EnergywiseDomain):
                    self._all_domains.append(child)
            # elif @fixme magic hook for reverse compatibility.
            if self._snmp_switches and self._cpex_switches:
                raise EConfiguration(
                    "All switches in a domain must be configurtion to use the"
                    " same protocol."
                    )
            self._cpex_switches.sort(_cpex_switch_cmp) 
        finally:
            self._cpex_lock.release()
        if not self.domain:
            self.domain = _find_domain(self)
        if self._snmp_switches:
            self.energywise_domain_usage = self.snmp_domain_usage
        elif self._cpex_switches:
            self.energywise_domain_usage = self.cpex_domain_usage
        else:
            self.energywise_domain_usage = self._empty_domain_usage
        Node.start(self)
        return
    def stop(self):
        self._reinit()
        Node.stop(self)
        return
    def cpex_domain_usage(self, importance=100, skipCache=False, max_attempts=3):
        self._cpex_lock.acquire()
        aggr_result = 0
        try:
            if not self._cpex_switches:
                raise ENoDomainSwitches(
                    "No switches are configured for the %r domain." % self.domain
                    )
            for switch in self._cpex_switches:
                for i in range(0, max_attempts):
                    try:
                        result = switch.cpex_domain_usage(importance)
                        if result:
                            aggr_result += result
                            break
                    except:
                        pass
        except ENoDomainSwitches:
            raise
        finally:
            self._cpex_lock.release()
        return aggr_result
    # caching for cpex switches under a domain
    # @return A dictionary of Energywise usage values keyed by switch address, possibly from cache.
    def cpex_switch_usage_map(self, importance=100, skipCache=False):
        self._cpex_switch_map_lock.acquire()
        try:
            if skipCache or self._is_cpex_switch_map_stale(time.time()):
                #fetch actual value
                usage_map = self._cpex_switch_usage_map(importance)
                self._update_cpex_switch_map(usage_map, time.time())
            return self._cpex_switch_map_cache
        finally:
            self._cpex_switch_map_lock.release()
        raise EUnreachableCode("Executed unreachable code!")
    ##
    # Call cpex_switch_usage_map on the primary cpex switch.
    # @return A dictionary of Energywise usage values keyed by switch address.
    def _cpex_switch_usage_map(self, importance=100, max_attempts=3):
        self._cpex_lock.acquire()
        aggr_result = {}
        try:
            if not self._cpex_switches:
                raise ENoDomainSwitches(
                    "No switches are configured for the %r domain." % self.domain
                    )
            for switch in self._cpex_switches:
                for i in range(0, max_attempts):
                    try:
                        result = switch.cpex_switch_usage_map(importance)
                        if result:
                            aggr_result.update(result)
                            break
                    except:
                        pass
        except ENoDomainSwitches:
            raise
        finally:
            self._cpex_lock.release()
        return aggr_result
    def snmp_domain_usage(self, importance=100, skipCache=False):
        result = 0
        for switch in self._snmp_switches:
            try:
                result += switch.snmp_switch_usage(importance, skipCache)
            except:
                msglog.exception()
                msglog.log("Energywise",msglog.types.ERR,
                           "Failed to get data from %r switch" %switch.name
                           )
        return result
    def _is_cpex_switch_map_stale(self, timestamp):
        return (self._cpex_switch_map_time + self.ttl) < timestamp
    def _update_cpex_switch_map(self, switch_map, timestamp):
        self._cpex_switch_map_cache = SwitchMap(switch_map)
        self._cpex_switch_map_time = timestamp
    def _is_domain_cache_stale(self,timestamp):
        assert self._cache_lock.locked()
        return (self._cache_time + self.ttl) < timestamp
    def _update_domain_cache(self,value, timestamp):
        self._cache_value = value
        self._cache_time = timestamp
    #caching the domain-usage
    def aggregate_domain_usage(self, importance=100, skipCache=False):
        self._cache_lock.acquire()
        try:
            if skipCache or self._is_domain_cache_stale(time.time()):
                    #fetch the actual value and update the cache
                try:
                    result = self.energywise_domain_usage(importance, skipCache)
                except:
                    msglog.exception()
                    result = 0
                for sub in self._all_domains:
                        result += sub.aggregate_domain_usage(importance, skipCache)
                self._update_domain_cache(result, time.time())
                return self._cache_value
            else:
                # use the cached value
                return self._cache_value
        finally:
            self._cache_lock.release()
        raise EUnreachableCode("Executed unreachable code!") 
    def new_trend(self,period):
        return new_trend(self,period)
    def delete_trend(self):
        return delete_trend(self)
    def get(self, skipCache=False):
        return self.aggregate_domain_usage()
    def get_result(self, skipCache=False):
        return Result(self.get(skipCache), time.time(), cached=False)
Example #47
0
class EWebConnectAlarmClient(Client):
    _batch_mode_default = 0
    _host_default = REQUIRED
    _port_default = 4546
    _timeout_default = 60
    def _init_default_attribute_values(self):
        self.batch_mode = self._batch_mode_default
        self.host = self._host_default
        self.port = self._port_default
        self.timeout = self._timeout_default
        return
    def __init__(self):
        self.__alarm_queue = Queue()
        self.__current_thread = None
        self.__lock = Lock()
        self._init_default_attribute_values()
        Client.__init__(self)
        return
    def configure(self,config):
        Client.configure(self,config)
        set_attribute(self,'batch_mode', self._batch_mode_default, config, int)
        set_attribute(self,'host', self._host_default, config, str)
        set_attribute(self,'port', self._port_default, config, int)
        set_attribute(self,'timeout', self._timeout_default, config, int)
        return
    def configuration(self):
        config = Client.configuration(self)
        get_attribute(self, 'batch_mode', config, int)
        get_attribute(self, 'host', config, str)
        get_attribute(self, 'port', config, int)
        get_attribute(self, 'timeout', config, int)
        return config
    def start(self):
        self.__lock.acquire()
        try:
            self.__running = 1
            self.register_event(NewAlarmsEvent,self._new_alarms)
        finally:
            self.__lock.release()
        Client.start(self)
        self.debug = 1
    def stop(self):
        self.__lock.acquire()
        try:
            self.unregister_event(NewAlarmsEvent)
            self.__running = 0
        finally:
            self.__lock.release()
        Client.stop(self)
    def is_running(self):
        return self.__running
    def message_log(self,message,message_type=msglog.types.DB):
        if message_type != msglog.types.DB or self.debug:
            msglog.log('EWebConnect Alarm Client',message_type,message)
    ##
    # Event handler for the NewAlarmsEvent
    #
    # Queues each new alarm for processing and then schedules
    # _prime_process_alarm_queue() on a thread pool to ensure that alarms
    # are processed.
    def _new_alarms(self, event):
        self.__lock.acquire()
        try:
            if not self.is_running():
                raise ENotStarted('%s' % self.as_node_url())
        finally:
            self.__lock.release()
        for alarm in event:
            self.__alarm_queue.put(alarm.as_dictionary())
        self.message_log('New Alarms Event, queuing action')
        LOW.queue_noresult(self._prime_process_alarm_queue)
        return
    ##
    # If no thread is actively processing the alarm queue, then set this thread
    # as the current alarm queue processor and invoke _process_alarm_queue().
    #
    # @note This method is in invoked as an action queued on a thread pool and
    #       should never be called directly when processing an event.
    def _prime_process_alarm_queue(self):
        # @todo Save queue in a PDO?
        thread = currentThread()
        self.__current_thread = thread
        self._process_alarm_queue(thread)
    ##
    # Process all alarms on the alarm queue.
    #
    # @note This method is in invoked indirectly as an action queued on a
    #       thread pool and should never be called directly when processing an
    #       event.
    def _process_alarm_queue(self,my_thread):
        self.message_log('Processing Alarm Queue...')
        while my_thread == self.__current_thread:
            alarm_dict = self.__alarm_queue.get(0)
            if alarm_dict is NOTHING:
                break
            try:
                self._send_alarm_dict(alarm_dict)
            except:
                self.message_log('Failed to send alarm:\n  %r' % alarm_dict,
                                 msglog.types.ERR)
                msglog.exception()
        else:
            self.message_log('New alarm process coincided with running process')
        self.message_log('Finished Processing Alarm Queue')
    ##
    # Format the Alarm described by alarm_dict as an eWebConnect Alarm
    # message and send it to the eWebConnect server.
    # @note This method always succeeds to format a message, any field that
    #       is not valid for any reason is set to "N/A".   Furthermore,
    #       this method does not intercept any networking failures as it
    #       is the caller's responsibility to handle retries, etc...
    def _send_alarm_dict(self, alarm_dict):
        if self.host is None:
            self.message_log('Failed to send alarm; host address is None:\n  %r' % alarm_dict,
                                 msglog.types.INFO)
            return # nowhere to send it!
        ewebconnect_text = (
            "%(timestamp)s, %(TZ)s, %(host)s, %(what)s, %(code)s:"
            " %(type)s %(text)s"
            ) % {
            "timestamp":self._alarm_timestamp(alarm_dict),
            "TZ":self._alarm_tz(alarm_dict),
            "host":self._alarm_host(alarm_dict),
            "what":self._alarm_what(alarm_dict),
            "code":self._alarm_code(alarm_dict),
            "type":self._alarm_type(alarm_dict),
            "text":self._alarm_text(alarm_dict),
            }
        self.message_log('Sending Alarm: %s' % ewebconnect_text)
        server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
        try:
            # @fixme Try block only exists because the normal desctructor
            #        invokation does not appear to work on mpx.lib.socket
            #        sockets which is a big deal if there is an exception.
            #        - mevans
            server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
            server_socket.connect((self.host, self.port), self.timeout)
            server_socket.sendall(ewebconnect_text, self.timeout)
        finally:
            # A finally block is used because the normal desctructor invokation
            # does not appear to work on mpx.lib.socket socket's. - mevans
            # @fixme Figure out why!
            server_socket.close() # Could this hang?  Should I use shutdown?
        self.message_log('Alarm Sent')
        return
    ##
    # Convert an Alarm's time-stamp to a string in eWebConnect's format.
    #
    # @param alarm_dict The dictionary representation of an Alarm, presumably
    #                   returned by the Alarm's as_dictionary() method.
    # @return A string representing the the time-stamp in eWebConnect's format.
    # @note Any failure during the conversion results in the string "N/A" and
    #       the exception is logged to the msglog.  This is to help ensure that
    #       the alarm is still delivered with as much useful information as
    #       possible.
    def _alarm_timestamp(self, alarm_dict):
        result = "N/A"
        try:
            localtime = time.localtime(alarm_dict['timestamp'])
            result = time.strftime("%m/%d/%Y %H:%M:%S", localtime)
        except:
            msglog.exception()
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_tz(self, alarm_dict):
        result = "N/A"
        try:
            is_dst = time.localtime(alarm_dict['timestamp']).tm_isdst
            result = time.tzname[is_dst]
        except:
            msglog.exception()
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_host(self, alarm_dict):
        result = "N/A"
        try:
            # @fixme F/W should have this as a 'system attribute' (aka
            #        property) and should support COV on properties.
            result = socket.gethostbyaddr(socket.gethostname())[0]
        except:
            msglog.exception()
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_code(self, alarm_dict):
        result = "N/A"
        try:
            result = alarm_dict['state']
        except:
            msglog.exception()
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_what(self, alarm_dict):
        result = "ALARM"
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_type(self, alarm_dict):
        result = "N/A"
        try:
            result = alarm_dict['type']
        except:
            msglog.exception()
        return result
    ##
    # @see _alarm_timestamp().
    def _alarm_text(self, alarm_dict):
        result = "N/A"
        try:
            result = alarm_dict['data']
        except:
            msglog.exception()
        return result
Example #48
0
class SNMP(CompositeNode):
    __node_id__ = '9e2a4bbf-d4cd-40c6-80ba-95edb94214ed'

    def __init__(self):
        super(SNMP, self).__init__()
        self.__lock = Lock()
        self.__mib_builder = None
        self.__mib_view = None
        self.__loaded_mibs = []
        snmpEngine = engine.SnmpEngine()
        transportDispatcher = dispatch.AsynsockDispatcher()
        transportDispatcher.setSocketMap({})
        snmpEngine.registerTransportDispatcher(transportDispatcher)
        self.__dispatcher = snmpEngine.transportDispatcher.runDispatcher
        self.__generator = cmdgen.AsynCommandGenerator(snmpEngine)
        self.__engine = snmpEngine
        return

    def start(self):
        self.__lock.acquire()
        try:
            self.__mib_builder = builder.MibBuilder()
            self.__mib_view = view.MibViewController(self.__mib_builder)
        finally:
            self.__lock.release()
        # Load some common MIBs:
        self.load_mib('SNMP-COMMUNITY-MIB')
        self.load_mib('SNMP-VIEW-BASED-ACM-MIB')
        self.load_mib('IF-MIB')
        super(SNMP, self).start()
        return

    def load_mib(self, mib_name, force=False):
        self.__lock.acquire()
        try:
            if force or (mib_name not in self.__loaded_mibs):
                self.__mib_builder.loadModules(mib_name)
                self.__loaded_mibs.append(mib_name)
        finally:
            self.__lock.release()
        return

    def describe_oid(self, oid):
        self.__lock.acquire()
        try:
            assert isinstance(oid, (tuple, univ.ObjectIdentifier))
            matched_oid, matched_label, suffix = (
                self.__mib_view.getNodeNameByOid(oid))
            mib_name, oid_name, empty = (
                self.__mib_view.getNodeLocation(matched_oid))
            assert matched_label[-1] == oid_name
            assert empty == ()
            matched_smi = self.__mib_builder.mibSymbols[mib_name][oid_name]
            return OidInfo(self.describe_oid, self.__mib_builder,
                           self.__mib_view, oid, matched_oid, matched_label,
                           suffix, mib_name, oid_name, matched_smi)
        finally:
            self.__lock.release()
        raise EUnreachableCode()

    def simple_varbinds_result(self, cbCtx):
        errorIndication = cbCtx['errorIndication']
        errorStatus = cbCtx['errorStatus']
        errorIndex = cbCtx['errorIndex']
        varBinds = cbCtx['varBinds']
        if errorIndication is not None or errorStatus:
            raise SNMPException(errorIndication=errorIndication,
                                errorStatus=errorStatus,
                                errorIndex=errorIndex,
                                varBinds=varBinds)
        return varBinds

    classmethod(simple_varbinds_result)

    def multi_varbinds_result(klass, cbCtx):
        varBindHead, varBindTotalTable, appReturn = cbCtx
        errorIndication = appReturn['errorIndication']
        errorStatus = appReturn['errorStatus']
        errorIndex = appReturn['errorIndex']
        varBindTable = appReturn['varBindTable']
        if errorIndication is not None or errorStatus:
            raise SNMPException(errorIndication=errorIndication,
                                errorStatus=errorStatus,
                                errorIndex=errorIndex,
                                varBindTable=varBindTable)
        varBinds = []
        for sublist in varBindTable:
            varBinds.extend(sublist)
        return varBinds

    classmethod(multi_varbinds_result)

    def simple_callback(klass, sendRequestHandle, errorIndication, errorStatus,
                        errorIndex, varBinds, cbCtx):
        cbCtx['errorIndication'] = errorIndication
        cbCtx['errorStatus'] = errorStatus
        cbCtx['errorIndex'] = errorIndex
        cbCtx['varBinds'] = varBinds
        # We are done, no more queries:
        return False

    classmethod(simple_callback)

    def multi_callback(klass, sendRequestHandle, errorIndication, errorStatus,
                       errorIndex, varBindTable, cbCtx):
        varBindHead, varBindTotalTable, appReturn = cbCtx
        if errorIndication or errorStatus:
            appReturn['errorIndication'] = errorIndication
            appReturn['errorStatus'] = errorStatus
            appReturn['errorIndex'] = errorIndex
            appReturn['varBindTable'] = varBindTable
            # No more SNMP requests required:
            return False
        else:
            varBindTotalTable.extend(varBindTable)  # XXX out of table
            # rows possible
            varBindTableRow = varBindTable[-1]
            for idx in range(len(varBindTableRow)):
                name, val = varBindTableRow[idx]
                if val is not None and varBindHead[idx].isPrefixOf(name):
                    break
            else:
                appReturn['errorIndication'] = errorIndication
                appReturn['errorStatus'] = errorStatus
                appReturn['errorIndex'] = errorIndex
                appReturn['varBindTable'] = varBindTotalTable
                # No more SNMP requests required:
                return False
        # Continue table retrieval:
        return True

    classmethod(multi_callback)

    def snmp_get_multiple(self, auth_data, transport, *object_names):
        self.__lock.acquire()
        try:
            cbCtx = {}
            self.__generator.asyncGetCmd(auth_data, transport, object_names,
                                         (self.simple_callback, cbCtx))
            self.__dispatcher()
            var_binds = self.simple_varbinds_result(cbCtx)
            return var_binds
        finally:
            self.__lock.release()
        raise EUnreachableCode()

    def snmp_set_multiple(self, auth_data, transport, var_binds):
        self.__lock.acquire()
        try:
            cbCtx = {}
            self.__generator.asyncSetCmd(auth_data, transport, var_binds,
                                         (self.simple_callback, cbCtx))
            self.__dispatcher()
            self.simple_varbinds_result(cbCtx)
            return
        finally:
            self.__lock.release()
        raise EUnreachableCode()

    def snmp_getbulk_multiple(self, auth_data, transport, non_repeaters,
                              max_repetitions, *object_names):
        self.__lock.acquire()
        try:
            appReturn = {}
            varBindHead = map(
                lambda (x, y): univ.ObjectIdentifier(x + y),
                map((lambda x, g=self.__generator: mibvar.mibNameToOid(
                    g.mibViewController, x)), object_names))
            varBindTotalTable = []
            cbCtx = (varBindHead, varBindTotalTable, appReturn)
            self.__generator.asyncBulkCmd(auth_data, transport, non_repeaters,
                                          max_repetitions, object_names,
                                          (self.multi_callback, cbCtx))
            self.__dispatcher()
            return self.multi_varbinds_result(cbCtx)
        finally:
            self.__lock.release()
        raise EUnreachableCode()

    def snmp_getnext_multiple(self, auth_data, transport, *object_names):
        self.__lock.acquire()
        try:
            appReturn = {}
            varBindHead = map(
                lambda (x, y): univ.ObjectIdentifier(x + y),
                map((lambda x, g=self.__generator: mibvar.mibNameToOid(
                    g.mibViewController, x)), object_names))
            varBindTotalTable = []
            cbCtx = (varBindHead, varBindTotalTable, appReturn)
            self.__generator.asyncNextCmd(auth_data, transport, object_names,
                                          (self.multi_callback, cbCtx))
            # Work around Eaton Powerare UPS's incorrect end-of-getnext packet.
            try:
                self.__dispatcher()
            except error.ProtocolError, e:
                # Some Eaton Powerware UPS SNMP stacks respond with an out
                # of range error-index (2) for the final getnext response
                # of NoSuchName.
                if not varBindTotalTable:
                    msglog.log(
                        "SNMP", msglog.types.WARN,
                        "Caught ProtocolError with empty"
                        " varBindTotalTable, re-raising.")
                    msglog.log("SNMP", msglog.types.DB, "cbCtx: %r" % cbCtx)
                    raise
                msglog.exception()
                msglog.log(
                    "SNMP", msglog.types.WARN,
                    "Ignoring protocol error to allow (partial)"
                    " discovery.")
                appReturn['errorIndication'] = None
                appReturn['errorStatus'] = None
                appReturn['errorIndex'] = 0
                appReturn['varBindTable'] = varBindTotalTable
            return self.multi_varbinds_result(cbCtx)
        finally:
Example #49
0
class DeviceRT(DeviceRS5):
    def __init__(self):
        self.rt_request_obj = device.real_time_value_req()
        self.rt_response_obj = device.real_time_value_res()
        self.cr_request_obj = device.control_relay_req()
        self.cr_response_obj = device.control_relay_res()
        self.rt_lock = Lock()
        self.cr_lock = Lock()
        self.rt_last_updated = 0
        super(DeviceRT, self).__init__()

    def configure(self, config):
        set_attribute(self, 'password', '11111100', config, str)
        super(DeviceRT, self).configure(config)

    def start(self):
        if self.is_running():
            raise EAlreadyRunning()
        #bug: CSCts88534
        if self.parent.parent.baud == 9600:
            self.response_obj = device.readmeterres()
        #bug ended
        self.configure_rt_packet()
        self.configure_cr_packet()
        super(DeviceRT, self).start()

    def configure_rt_packet(self):
        req_addr = self.rt_request_obj.findChildByName('addr')
        req_bcc = self.rt_request_obj.findChildByName('bcc')
        req_addr.setValue(self.bin_addr)
        checksum = (0xC3 + calc_sum(self.bin_addr)) & 0xFF
        req_bcc.setValue(checksum)

    def configure_cr_packet(self):
        req_addr = self.cr_request_obj.findChildByName('addr')
        req_pwd = self.cr_request_obj.findChildByName('pwd')
        password = format_password(self.password)
        req_addr.setValue(self.bin_addr)
        req_pwd.setValue(password)
        self.cr_checksum = (0x58 + calc_sum(self.bin_addr) +
                            calc_sum(password)) & 0xFF

    def _read_rt_value(self, wait_time=None, numretries=None):
        """Don't use Me.

        """
        if numretries is None:
            numretries = self.retry_count
        while numretries:
            try:
                self._send_request(self.rt_request_obj, self.rt_response_obj,
                                   wait_time, 1)
                resp_addr = self.rt_response_obj.findChildByName('addr')
                resp_data = self.rt_response_obj.findChildByName('data')
                resp_bcc = self.rt_response_obj.findChildByName('bcc')
                addr = resp_addr.getValue()
                data = resp_data.getValue()
                bcc = resp_bcc.getValue()
                checksum = (0xD9 + calc_sum(addr) + calc_sum(data)) & 0xFF
                if checksum != bcc:
                    raise EBadChecksum()
                if self.bin_addr != addr:
                    raise EInvalidMessage()

                values = {}
                values['EM_RecMeterValue'] = format_rt_reading(data[0:4])
                values['EM_SenMeterValue'] = format_rt_reading(data[4:8])
                values['EM_RecActivePower'] = format_rt_reading(data[8:12])
                values['EM_SenActivePower'] = format_rt_reading(data[12:16])
                values['EM_RecPassive1MeterValue'] = format_rt_reading(
                    data[16:20])
                values['EM_RecPassive2MeterValue'] = format_rt_reading(
                    data[20:24])
                values['EM_RecPassivePower1'] = format_rt_reading(data[24:28])
                values['EM_RecPassivePower2'] = format_rt_reading(data[28:32])
                values['EM_SenPassive1MeterValue'] = format_rt_reading(
                    data[32:36])
                values['EM_SenPassive2MeterValue'] = format_rt_reading(
                    data[36:40])
                values['EM_SenPassivePower1'] = format_rt_reading(data[40:44])
                values['EM_SenPassivePower2'] = format_rt_reading(data[44:48])

                #have to rename voltages and currents
                values['EM_Voltage_1'] = format_rt_reading(data[48:52])
                values['EM_Voltage_2'] = format_rt_reading(data[52:56])
                values['EM_Voltage_3'] = format_rt_reading(data[56:60])
                values['EM_Current_1'] = format_rt_reading(data[60:64])
                values['EM_Current_2'] = format_rt_reading(data[64:68])
                values['EM_Current_3'] = format_rt_reading(data[68:72])
                values['EM_Phase_1'] = format_rt_reading(data[72:76])
                values['EM_Phase_2'] = format_rt_reading(data[76:80])
                values['EM_Phase_3'] = format_rt_reading(data[80:84])
                values['EM_Hz'] = format_rt_reading(data[84:88])
                values['EM_PowerRelay'] = format_rt_reading(data[88:92])

                values['WM_MeterValue'] = format_rt_reading(data[92:96])
                values['WM_MeterPower'] = format_rt_reading(data[96:100])
                values['HM_MeterValue'] = format_rt_reading(data[100:104])
                values['HM_MeterPower'] = format_rt_reading(data[104:108])
                values['GM_MeterValue'] = format_rt_reading(data[108:112])
                values['GM_MeterPower'] = format_rt_reading(data[112:116])
                values['CM_MeterValue'] = format_rt_reading(data[116:120])
                values['CM_MeterPower'] = format_rt_reading(data[120:124])
                self.update_rt_value(values)
                return
            except:
                numretries -= 1
        raise

    def _write_cr_value(self, value, wait_time=None, numretries=None):
        """Only to be used by set_cr_value method

        Value should only be an integer
        """
        if not self.is_running():
            raise ENotRunning()
        if numretries is None:
            numretries = self.retry_count
        data = self.cr_request_obj.findChildByName('data')
        bcc = self.cr_request_obj.findChildByName('bcc')
        checksum = (self.cr_checksum + value) & 0xFF
        data.setValue(value)
        bcc.setValue(checksum)
        while numretries:
            try:
                self._send_request(self.cr_request_obj, self.cr_response_obj,
                                   wait_time, 1)
                resp_addr = self.cr_response_obj.findChildByName('addr')
                resp_bcc = self.cr_response_obj.findChildByName('bcc')
                addr = resp_addr.getValue()
                bcc = resp_bcc.getValue()
                checksum = (0xD3 + calc_sum(addr)) & 0xFF
                if checksum != bcc:
                    raise EBadChecksum()
                if self.bin_addr != addr:
                    raise EInvalidMessage()
                return
            except:
                numretries -= 1
        raise

    def get_rt_value_as_dict(self):
        return self.rt_value

    def update_rt_value(self, value):
        self.rt_value = value
        self.rt_last_updated = time.time()

    def is_rt_value_stale(self):
        return ((time.time() - self.rt_last_updated) > self.cache_life)

    def get_rt_value_by_name(self, name, skipcache=0):
        """get method for rt values
        
        name is case sensitive
        """
        self.rt_lock.acquire()
        try:
            if self.is_rt_value_stale() or skipcache:
                self._read_rt_value()
        finally:
            self.rt_lock.release()
        return self.rt_value[name]

    def set_cr_value(self, value):
        self.cr_lock.acquire()
        try:
            self._write_cr_value(value)
        finally:
            self.cr_lock.release()
Example #50
0
class Kwh2Kw(CompositeNode):
    def __init__(self):
        self._history = None
        self._history_lock = Lock()
        self._sid = None
        self._nid = 1
        self._poll_failure = False
        self._scheduled = None
        self.running = False
        super(Kwh2Kw, self).__init__()
        return

    def configure(self, cd):
        super(Kwh2Kw, self).configure(cd)
        set_attribute(self, 'link', REQUIRED, cd)
        # sample_period and window used to set the number of
        # samples that constitutes the size of the moving avg.
        set_attribute(self, 'sample_period', 10.0, cd, float)
        set_attribute(self, 'window', 120, cd, int)
        self._window_size = self.window / self.sample_period
        if self.running:
            # reconfigure things
            self.stop()
            self.start()
        return

    def configuration(self):
        cd = super(Kwh2Kw, self).configuration()
        get_attribute(self, 'link', cd)
        get_attribute(self, 'sample_period', cd)
        get_attribute(self, 'window', cd)
        return cd

    def start(self):
        super(Kwh2Kw, self).start()
        self.running = True
        self._history = KwList(self._window_size)
        self._sid = SM.create_polled({self._nid: self.link})
        # retrieve an initial value to start things off
        value = ts = None
        result = SM.poll_all(self._sid)
        if result is None:
            # still waiting
            try:
                value = as_node(self.link).get()
                ts = time.time()
            except:
                pass
        else:
            try:
                value = result[self._nid]['value']
                ts = result[self._nid]['timestamp']
            except:
                pass
            if isinstance(value, MpxException):
                value = None
        if value and ts:
            self._history.add(value, ts)
        self._scheduled = scheduler.seconds_from_now_do(
            self.sample_period, self.run_update)
        return

    def stop(self):
        self.running = False
        self._history = None
        try:
            SM.destroy(self._sid)
        except:
            pass
        self._sid = None
        self._history_lock.acquire()
        try:
            self._history = None
            s = self._scheduled
            self._scheduled = None
            if s is not None:
                try:
                    s.cancel()
                except:
                    pass
        finally:
            self._history_lock.release()
        return

    ##
    # update() can be relatively slow, run it on a threadpool
    def run_update(self):
        NORMAL.queue_noresult(self.update)
        return

    def update(self):
        try:
            value = ts = None
            result = SM.poll_all(self._sid)
            if result is not None:
                value = result[self._nid]['value']
                ts = result[self._nid]['timestamp']
            self._history_lock.acquire()
            try:
                if value is None or isinstance(value, MpxException):
                    # there were problems collecting during this period,
                    # our calculation should not proceed
                    self._history.clear()
                    if not self._poll_failure:
                        # log the failure, but don't spam the msglog
                        self._poll_failure = True
                        msglog.log(
                            'Kwh2Kw', msglog.types.WARN,
                            'Failed to retrieve data from %s' % self.link)
                else:
                    self._poll_failure = False
                    self._history.add(value, ts)
            finally:
                self._history_lock.release()
        except:
            msglog.exception()
        self._scheduled = scheduler.seconds_from_now_do(
            self.sample_period, self.run_update)
        return

    def get(self, skipCache=0):
        return self._history.moving_average()
Example #51
0
class CBus(Node):
    __node_id__ = "50b9f586-fe01-4c1b-a92a-dc5b27b7567e"
    PROMPT = "ready>\n"

    def __init__(self):
        Node.__init__(self)
        self._dev_name = None  # Can programmatically override BEFORE start.
        self._lock = Lock()
        self._popen = None
        self._apps = []
        self._groups = []
        self._discovery_ts = None
        return

    def _get_dev_name(self):
        if self._dev_name:
            return self._dev_name
        self._dev_name = self.parent.dev
        return self._dev_name

    def configure(self, config):
        Node.configure(self, config)
        set_attribute(self, "auto_discover", 1, config, as_boolean)
        set_attribute(self, "debug", 0, config, as_boolean)
        return

    def configuration(self):
        config = Node.configuration(self)
        get_attribute(self, "auto_discover", config)
        get_attribute(self, "debug", config)
        return config

    def _restart(self):
        assert self._lock.locked()
        self._reset_popen()
        self._popen = popen2.Popen3("superexec %s %s" % (CBUS_CLI, self._get_dev_name()), False)
        line = self._popen.fromchild.readline()
        if self.debug:
            print "< %r" % line
        while line and line != CBus.PROMPT:
            line = self._popen.fromchild.readline()
            if self.debug:
                print "< %r" % line
        if self.auto_discover:
            # This is really a work around to how CBM discovers initial values.
            self._discover()
        return

    def start(self):
        self._lock.acquire()
        try:
            self._restart()
        finally:
            self._lock.release()
        Node.start(self)
        return

    def stop(self):
        self._lock.acquire()
        try:
            self._reset_popen()
            self._dev_name = None
            self._discovery_ts = None
        finally:
            self._lock.release()
        Node.stop(self)
        return

    def _reset_popen(self):
        if self._popen is not None:
            try:
                os.kill(self._popen.pid, signal.SIGTERM)
            except OSError, e:
                if e.errno != errno.ESRCH:
                    raise
            try:
                self._popen.wait()
            except OSError, e:
                if e.errno != errno.ECHILD:
                    raise
            self._popen = None
Example #52
0
class _User(PersistentDataObject):
    USERS = _UserDictionary()

    def __init__(self,
                 name,
                 new=0,
                 password_file=PASSWD_FILE,
                 group_file=GROUP_FILE,
                 shadow_file=SHADOW_FILE):
        self.__lock = Lock()
        self.__password_file = password_file
        self.__shadow_file = shadow_file
        self.__group_file = group_file
        self.__loaded = 0
        self.__file_modified = 0
        self.load(name)
        self.meta = {}
        self.USERS.load()
        if not self.USERS.has_key(self.name()):
            msglog.log('broadway', msglog.types.INFO,
                       ('No profile for user %s found, creating'
                        ' new profile' % name))
            self.USERS[self.name()] = str(UUID())
        PersistentDataObject.__init__(self, self.USERS[self.name()])
        PersistentDataObject.load(self)

    def loaded(self):
        self.__lock.acquire()
        try:
            return self.__loaded
        finally:
            self.__lock.release()

    def load(self, name):
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            passwd_db.load()
            if name in passwd_db:
                self.__user = passwd_db[name]
            else:
                self.__user = None
                raise EInvalidValue('name', name, 'No such user.')
            self.__file_modified = passwd_db.last_modified()

            # loading /etc/shadow database
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            if name in shadow_db:
                self.__shadow = shadow_db[name]
            else:
                self.__shadow = None
                raise EInvalidValue('User (', name,
                                    ') does not exist in shadow')
            self.__shadow_file_modified = shadow_db.last_modified()

            self.__loaded = 1
        finally:
            self.__lock.release()

    def reload(self):
        self.load(self.name())

    def save(self):
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            passwd_db.load()
            passwd_db[self.name()] = self.password_entry()
            passwd_db.save()

            # save /etc/shadow content
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            shadow_db[self.name()] = self.shadow_entry()
            shadow_db.save()
        finally:
            self.__lock.release()
        self.load(self.name())

    def name(self):
        return self.__user.user()

    def password(self):
        raise ENotImplemented(self.password)

    def set_password(self, password, validate=True):
        self.__lock.acquire()
        try:
            shadow_db = ShadowFile(self.__shadow_file)
            shadow_db.load()
            shadowentry = shadow_db[self.name()]
            shadowentry.passwd(password, validate)
            shadow_db[self.name()] = shadowentry
            shadow_db.save()
        finally:
            self.__lock.release()
        self.load(self.name())

    def crypt(self):
        return self.__shadow.crypt()

    def set_crypt(self, crypt):
        self.__shadow.crypt(crypt)

    def uid(self):
        return self.__user.uid()

    def set_uid(self, uid):
        self.__user.uid(uid)

    def gid(self):
        return self.__user.gid()

    def set_gid(self, gid):
        self.__user.gid(gid)

    def group(self):
        return self.__user.groups()[0]

    def groups(self):
        group_db = GroupFile(self.__group_file)
        group_db.load()
        return self.__user.groups(group_db)

    def group_ids(self):
        ids = []
        for group in self.groups():
            ids.append(group.gid())
        return ids

    def gecos(self):
        return self.__user.gecos()

    def set_gecos(self, gecos):
        self.__user.gecos(gecos)

    def directory(self):
        return self.__user.directory()

    def set_directory(self, directory):
        self.__user.directory(directory)

    def shell(self):
        return self.__user.shell()

    def set_shell(self, shell):
        self.__user.shell(shell)

    def is_dirty(self):
        if not self.__loaded:
            return 1
        self.__lock.acquire()
        try:
            passwd_db = PasswdFile(self.__password_file)
            if not passwd_db.exists():
                return 1
            else:
                return not not (passwd_db.last_modified() >
                                self.__file_modified)

            shadow_db = ShadowFile(self.__shadow_file)
            if not shadow_db.exists():
                return 1
            else:
                return not not (shadow_db.last_modified() >
                                self.__shadow_file_modified)
        finally:
            self.__lock.release()

    def set_meta_value(self, name, value):
        self.meta[name] = value
        PersistentDataObject.save(self)

    def get_meta_value(self, name, default=None):
        if self.meta.has_key(name):
            return self.meta[name]
        return default

    def get_meta(self):
        return self.meta.copy()

    def __getitem__(self, name):
        return self.get_meta_value(name)

    def __setitem__(self, name, value):
        return self.set_meta_value(name, value)

    def has_key(self, k):
        return self.meta.has_key(k)

    def items(self):
        return self.meta.items()

    def values(self):
        return self.meta.values()

    def keys(self):
        return self.meta.keys()

    def password_entry(self):
        return self.__user

    def shadow_entry(self):
        return self.__shadow

    def user_type(self):
        return self.__user.user_type()

    def password_expired(self):
        if self.crypt()[:3] == '$1$':
            return False
        else:
            raise EPasswordExpired(self.name())
        return True

    def is_admin(self):
        return self.user_type() == "mpxadmin"
Example #53
0
class CBus(Node):
    __node_id__ = "50b9f586-fe01-4c1b-a92a-dc5b27b7567e"
    PROMPT = "ready>\n"

    def __init__(self):
        Node.__init__(self)
        self._dev_name = None  # Can programmatically override BEFORE start.
        self._lock = Lock()
        self._popen = None
        self._apps = []
        self._groups = []
        self._discovery_ts = None
        return

    def _get_dev_name(self):
        if self._dev_name:
            return self._dev_name
        self._dev_name = self.parent.dev
        return self._dev_name

    def configure(self, config):
        Node.configure(self, config)
        set_attribute(self, 'auto_discover', 1, config, as_boolean)
        set_attribute(self, 'debug', 0, config, as_boolean)
        return

    def configuration(self):
        config = Node.configuration(self)
        get_attribute(self, 'auto_discover', config)
        get_attribute(self, 'debug', config)
        return config

    def _restart(self):
        assert self._lock.locked()
        self._reset_popen()
        self._popen = popen2.Popen3(
            "superexec %s %s" % (CBUS_CLI, self._get_dev_name()), False)
        line = self._popen.fromchild.readline()
        if self.debug:
            print "< %r" % line
        while (line and line != CBus.PROMPT):
            line = self._popen.fromchild.readline()
            if self.debug:
                print "< %r" % line
        if self.auto_discover:
            # This is really a work around to how CBM discovers initial values.
            self._discover()
        return

    def start(self):
        self._lock.acquire()
        try:
            self._restart()
        finally:
            self._lock.release()
        Node.start(self)
        return

    def stop(self):
        self._lock.acquire()
        try:
            self._reset_popen()
            self._dev_name = None
            self._discovery_ts = None
        finally:
            self._lock.release()
        Node.stop(self)
        return

    def _reset_popen(self):
        if (self._popen is not None):
            try:
                os.kill(self._popen.pid, signal.SIGTERM)
            except OSError, e:
                if e.errno != errno.ESRCH:
                    raise
            try:
                self._popen.wait()
            except OSError, e:
                if e.errno != errno.ECHILD:
                    raise
            self._popen = None
Example #54
0
class Device(CompositeNode, UpdateMixin):
    def __init__(self):
        self._subscription_lock = Lock()
        self._subscribed = 0
        self._subscribers = {}
        self._last_value = None
        self._last_rcvd = None
        self._decode_indexes = {}
        return

    def configure(self, cd):
        super(Device, self).configure(cd)
        set_attribute(self, 'ttl', 60, cd, int)
        set_attribute(self, 'swid', '', cd)
        return

    def configuration(self):
        cd = super(Device, self).configuration()
        get_attribute(self, 'ttl', cd)
        get_attribute(self, 'swid', cd)
        return cd

    def start(self):
        if self.swid:
            self.url = BASE_URL % (self.station.host, self.swid)
            self._rqst = JaceRequest(self.url, ttl=self.ttl)
        super(Device, self).start()
        return

    def can_bundle(self):
        return bool(self.swid)

    def subscribe(self, name, func):
        self._subscription_lock.acquire()
        try:
            ##
            # if there are multiple external consumers, they are subscribed
            # via event producing child node.
            self._subscribers[name] = func
            self._subscribed += 1
            if self._last_value and (uptime.secs() -
                                     self._last_rcvd) < self.ttl:
                try:
                    value = self._last_value.get(name)
                    func(value)
                except:
                    pass
            if self._subscribed == 1:
                self.update_continuous(None)
        finally:
            self._subscription_lock.release()
        return

    def unsubscribe(self, name):
        self._subscription_lock.acquire()
        try:
            assert self._subscribed, 'Cannot decrement subscribers below 0'
            del self._subscribed[name]
            self._subscribed -= 1
        finally:
            self._subscription_lock.release()
        return

    def event_has_subscribers(self):
        return bool(self._subscribed)

    def _load_indexes(self, data):
        d_len = len(data)
        for name in self._subscribers.keys():
            index = offset = 0
            for l in data:
                if l.count('<' + name + '>'):
                    break
                index += 1
            for l in data[index:]:
                if l.count('<value>'):
                    break
                offset += 1
            if (index + offset) > d_len:
                index = offset = None
            self._decode_indexes[name] = (index, offset)
        return

    def _get_indexes(self, name):
        return self._decode_indexes.get(name, (None, None))

    def _have_indexes(self):
        indexes = self._decode_indexes.keys()
        for interest in self._subscribers.keys():
            if interest not in indexes:
                return False
        return True

    def decode(self, data_s):
        if data_s.startswith('<!--'):
            data_s = data_s[(data_s[1:].find('<') + 1):]
        data = data_s.split('\n')
        if not self._have_indexes():
            self._load_indexes(data)
        values = {}
        for name in self._subscribers.keys():
            index, offset = self._get_indexes(name)
            try:
                if not data[index].count(name) or not data[
                        index + offset].count('value'):
                    return self._decode_slow(data_s)
                l = data[index + offset]
                values[name] = l.split('>')[1].split('<')[0]
            except:
                return self._decode_slow(data_s)
        return values

    def _decode_slow(self, data):
        try:
            data_o = xml2code(data)
        except:
            data_o = None
        return data_o

    def update_cache(self, value_obj):
        for name, func in self._subscribers.items():
            value = value_obj.get(name)
            func(value)
        self._last_value = value
        self._last_rcvd = uptime.secs()
        return

    def _get_station(self):
        return self.parent

    station = property(_get_station)
Example #55
0
class EnergywiseSwitch(Node):
    SNMP_REMOTE_AGENTS_PATH = '/services/network/SNMP/Remote Agents'
    CEW_ENT_ENTRY_SUBPATH = (
        'Managed Objects/iso/org/dod/internet/private/enterprises/cisco/'
        'ciscoMgmt/ciscoEnergywiseMIB/ciscoEnergywiseMIBObjects/cewEntTable/'
        'cewEntEntry'
        )
    PROTOCOL_SNMP = 'SNMP'
    PROTOCOL_NATIVE = 'Native'
    _PROTOCOL_MAP = {'snmp':PROTOCOL_SNMP,'native':PROTOCOL_NATIVE}
   
    def get_cewEntEnergyUsage_node(self):
        usage_url = os.path.join(self.SNMP_REMOTE_AGENTS_PATH,
                                self.name,
                                self.CEW_ENT_ENTRY_SUBPATH,
                                'cewEntEnergyUsage')
        return as_node(usage_url)
    def get_cewEntEnergyUnits_node(self):
        units_url = os.path.join(self.SNMP_REMOTE_AGENTS_PATH,
                                 self.name,
                                 self.CEW_ENT_ENTRY_SUBPATH,
                                 'cewEntEnergyUnits')
        return as_node(units_url)
    def get_snmp_switch_agent_node(self):
        snmp_switch_url = os.path.join(self.SNMP_REMOTE_AGENTS_PATH,
                                       self.name)
        return as_node(snmp_switch_url)
    def _not_running(self, *args, **kw):
        raise ENotRunning("%r is not running." % self.as_node_url())

    def as_protocol_name(self,protocol):
        try:
            return self._PROTOCOL_MAP[protocol.lower()]
        except KeyError:
            raise EInvalidValue('protocol',protocol,
                                "%r is not a supported protocol for %r" 
                                %(protocol,self.as_node_url())
                                )
        
    #
    # Initialize all parameters for energywise switch
    def __init__(self):
        Node.__init__(self)
        self._cpex_connect_orig = self._cpex_connect
        self._cpex_connect = self._not_running
        self.running = 0
        self.domain = ''
        self.address = REQUIRED
        self.snmp_version = REQUIRED
        self.snmp_batch_size = 50
        self.community_name = REQUIRED
        self.security_name = None
        self.username = None
        self.authentication_protocol = None
        self.authentication_key = None
        self.privacy_protocol = None
        self.privacy_key = None
        self.remote_agent = None
        self.trend_node = None
        self.period = 60
        self.debug = 0
        self.shared_secret = REQUIRED
        self.cpex_port = CPEX_DEFAULT_PORT
        self.primary = False
        self.protocol = 'SNMP'
        self.cpex_connect_retry = 60
        self.cpex_timeout = 1 # 12
        self.get_switch_usage = self._not_running
        self.cewEntEnergyUsage_node = None
        self.cewEntEnergyUnits_node = None
        self.snmp_usage_map = None
        self.snmp_switch_agent_node = None
        self._snmp_cache_lock=Lock()
        self._snmp_cache_value=None
        self._snmp_cache_time=0
        self.ttl=30 
        return
    # on startup following parameters are passed.
    def configure(self, config):
        if self.debug:
            msg = 'Inside Configure api '
            msglog.log('Energywise:', msglog.types.INFO, msg )
            msg = 'sys path %s'
            msglog.log('Energywise:', msglog.types.INFO, msg %sys.path)
        Node.configure(self, config)
        set_attribute(self, 'ttl', 30, config, int)
        set_attribute(self, 'debug', 0, config, int)
        set_attribute(self, 'address', REQUIRED, config)
        set_attribute(self, 'shared_secret', REQUIRED, config)
        set_attribute(self, 'cpex_port', 43440, config, int)
        set_attribute(self, 'primary', False, config, as_boolean)
        #SNMP is taken as default for reverse compatibility
        set_attribute(self, 'protocol', 'SNMP', config, 
                      self.as_protocol_name)
        set_attribute(self, 'snmp_batch_size', 50, config, int)
        if self.debug:
            msg = 'Configured address  %s '
            msglog.log('Energywise:', msglog.types.INFO, msg %self.address)
        if not self.domain:
            self.domain = _find_domain(self)
            if self.debug:
                msg = 'Configured CPEX domain %s '
                msglog.log('Energywise:', msglog.types.INFO, msg %self.domain)
        set_attribute(self,'snmp_version', REQUIRED, config)
        if self.debug:
            msg = 'Configured snmp_version %s'
            msglog.log('Energywise:', msglog.types.INFO, msg %self.snmp_version)
        set_attribute(self,'community_name',REQUIRED, config)
        if self.debug:
            msg = 'Configured community_name %s'
            msglog.log('Energywise:', msglog.types.INFO,
                                      msg %self.community_name)
        set_attribute(self,'security_name','default', config)
        if self.debug:
            msg = 'Configured security_name %s'
            msglog.log('Energywise:', msglog.types.INFO,
                                      msg %self.security_name)
        set_attribute(self,'user_name',' ', config)
        set_attribute(self,'authentication_protocol','usmNoAuthProtocol',
                      config)
        set_attribute(self,'authentication_key',' ', config)
        set_attribute(self,'privacy_protocol','usmNoPrivProtocol', config)
        set_attribute(self,'privacy_key', ' ',config)
        return
    def configuration(self):
        if self.debug:
            msg = 'Inside Configuration()'
            msglog.log('Energywise:', msglog.types.INFO, msg)
        config = Node.configuration(self)
        get_attribute(self, 'ttl', config, str)
        get_attribute(self, 'debug', config, str)
        get_attribute(self, 'domain', config)
        get_attribute(self, 'address', config)
        get_attribute(self, 'snmp_version', config)
        get_attribute(self, 'snmp_batch_size', config, str)
        get_attribute(self, 'community_name', config)
        get_attribute(self, 'security_name', config)
        get_attribute(self, 'user_name', config)
        get_attribute(self, 'authentication_protocol', config)
        get_attribute(self, 'authentication_key', config)
        get_attribute(self, 'privacy_protocol', config)
        get_attribute(self, 'privacy_key', config)
        get_attribute(self, 'shared_secret', config)
        get_attribute(self, 'cpex_port', config)
        get_attribute(self, 'primary', config)
        get_attribute(self, 'protocol', config)
        return config
    def start(self):
        if self.debug:
            msg = 'Inside Start()'
            msglog.log('Energywise:', msglog.types.INFO, msg)
        if not self.running:
            if not (1024 < self.cpex_port < 65536):
                raise EConfiguration(
                    "Invalid port specified (%d). "
                    "Please enter values between 1025 and 65535 " 
                    % self.cpex_port
                    )
            self.running = 1
            self._cpex_connect = self._cpex_connect_orig
            Node.start(self)
            if self.PROTOCOL_SNMP == self.protocol:
                # Create SNMP node for this remote_agent
                self.createEnergywiseSNMPNodes()
                self.snmp_switch_agent_node = self.get_snmp_switch_agent_node()
                self.cewEntEnergyUsage_node = self.get_cewEntEnergyUsage_node()
                self.cewEntEnergyUnits_node = self.get_cewEntEnergyUnits_node()
                self.snmp_usage_map = {}
                for child in self.cewEntEnergyUsage_node.children_nodes():
                    self.snmp_usage_map[('usage', child.name)] = child
                for child in self.cewEntEnergyUnits_node.children_nodes():
                    self.snmp_usage_map[('units', child.name)] = child
                self.get_switch_usage = self.snmp_switch_usage
            else:
                self.get_switch_usage = self.cpex_switch_usage
        return
    def stop(self):
        if self.debug:
            msg = 'Inside Stop()'
            msglog.log('Energywise:', msglog.types.INFO, msg)
        if self.running:
            Node.stop(self)
            self.running = 0
            self.get_switch_usage = self._not_running
        return
    
    # Create all required mib and snmp nodes automatically for monitoring app.
    def createEnergywiseSNMPNodes(self):
        if self.debug:
            msglog.log('Energywise:',msglog.types.INFO,
                'Inside createEnergywiseSNMPNodes')
        try:
            # check SNMP node is present
            snmpRef = as_node("/services/network/SNMP")
        except:
            msglog.log('Energywise:',msglog.types.ERR,
                'Missing SNMP node under /services/network, please create it.')
        snmpNode = node_factory(
             "mpx.service.network.snmp.remote_agent.RemoteAgent")
        snmpNode.configure({
                'parent':"/services/network/SNMP/Remote Agents",
                'name': self.name,
                'address':self.address,
                'mib_table':[{'name':'CISCO-ENERGYWISE-MIB',
                          'value':"CISCO-ENERGYWISE-MIB"}],
                'port':161,
                'discover_at_start':0,
                'max_batch_size':self.snmp_batch_size,
                })
            # create SNMP version node under snmpNode
        if self.snmp_version == 'v1':
            version = '1'
            snmpVersionNode = node_factory(
                "mpx.service.network.snmp.remote_agent.SNMPv1"
                )
            parent_name = "/services/network/SNMP/Remote Agents/"
            parent_name += self.name
            snmpVersionNode.configure({
                'parent':parent_name,
                'name':"SNMPv1",
                'version':version,
                'community_name':self.community_name,
                'security_name':self.security_name
                })
        if self.snmp_version == 'v2c':
            version = '2c'
            snmpVersionNode = node_factory(
                "mpx.service.network.snmp.remote_agent.SNMPv2c")

            parent_name = "/services/network/SNMP/Remote Agents/"
            parent_name += self.name
            snmpVersionNode.configure({
                'parent':parent_name,
                'name':"SNMPv2c",
                'version':version,
                'community_name':self.community_name,
                'security_name':self.security_name
                })
        if self.snmp_version == 'v3':
            version = '3'
            snmpVersionNode = node_factory(
                "mpx.service.network.snmp.remote_agent.SNMPv3")

            parent_name = "/services/network/SNMP/Remote Agents/"
            parent_name += self.name
            snmpVersionNode.configure({
                'parent':parent_name,
                'name':"SNMPv3",
                'version':version,
                'username':self.username,
                'authentication_protocol':self.authentication_protocol,
                'authentication_key':self.authentication_key,
                'privacy_protocol':self.privacy_protocol,
                'privacy_key':self.privacy_key })
        snmpNode.start()
        # Start discovery of interested oids
        managedObjects = snmpNode.get_child(snmpNode.MANAGED_OBJECTS)
        seek_from = (1,3,6,1,4,1,9,9,683,1,3) # CISCO-ENERGYWISE::cewDomainName
        managedObjects.batch_all_from(seek_from,True)
        mib_node = parent_name
        mib_node += '/Managed Objects/iso/org/dod/internet/private/'\
            'enterprises/cisco/ciscoMgmt/ciscoEnergywiseMIB/'\
            'ciscoEnergywiseMIBObjects/cewDomainName/0'
        domainNode = as_node(mib_node)
        val = domainNode.get()
        if not str(val) == self.domain:
            raise EInvalidValue('self.domain',self.domain)
        if self.debug:
            msglog.log('Energywise:',msglog.types.INFO,
                'Energywise domain managed by this switch is %s'%str(val))
        seek_from = (1,3,6,1,4,1,9,9,683,1,6) # CISCO-ENERGYWISE::cewEntTable
        managedObjects.batch_all_from(seek_from,True)
        return
   
    def new_trend(self,period):
        return new_trend(self,period)
    def delete_trend(self):
        return delete_trend(self)
  
    def _cpex_connect(self):
        uuid = energywise_utl_createUuid()
        key = energywise_utl_composeKey(self.shared_secret, uuid)
        cpex_session = energywise_createSession(
            self.address, self.cpex_port, uuid, key, self.cpex_timeout
            )
        if cpex_session == 0:
            raise EConnectFailed(
                "Failed to create a session with %(address)s:%(cpex_port)d" %
                self.configuration()
                )
        return cpex_session
    def _cpex_disconnect(self, cpex_session):
        energywise_closeSession(cpex_session)
        return
    def cpex_domain_usage(self, importance=100):
        cpex_query = 0
        result_set = 0
        usage = 0
        cpex_session = self._cpex_connect()
        try:
            cpex_query = energywise_createSumQuery(self.domain, importance)
            energywise_addGetAttribute(cpex_query, EW_ATTRIBUTE_TYPE_UNITS)
            energywise_addGetAttribute(cpex_query, EW_ATTRIBUTE_TYPE_USAGE)
            energywise_execQuery(cpex_session, cpex_query)
            result_set = energywise_queryResults(cpex_session, cpex_query)
            if result_set == 0:
                raise EnergywiseCommunicationFailure(
                    "Domain sum query via %r failed." % self.address
                    )
            result_row = energywise_getNextRow(result_set)
            while result_row:
                units = energywise_getAttributeFromRowByType(
                    result_row, EW_ATTRIBUTE_TYPE_UNITS
                    ).value
                usage += energywise_getAttributeFromRowByType(
                    result_row, EW_ATTRIBUTE_TYPE_USAGE
                    ).value * 10**units
                result_row = energywise_getNextRow(result_set)
        finally:
            if result_set != 0:
                energywise_releaseResult(result_set)
            if cpex_query != 0:
                energywise_releaseQuery(cpex_query)
            self._cpex_disconnect(cpex_session)
        return usage
    def cpex_switch_usage_map(self, importance=100):
        cpex_query = 0
        result_set = 0
        usage_map = {}
        cpex_session = self._cpex_connect()
        try:
            cpex_query = energywise_createCollectQuery(self.domain, importance)
            energywise_addGetAttribute(cpex_query, EW_ATTRIBUTE_TYPE_USAGE)
            energywise_addGetAttribute(cpex_query, EW_ATTRIBUTE_TYPE_UNITS)
            energywise_execQuery(cpex_session, cpex_query)
            result_set = energywise_queryResults(cpex_session, cpex_query)
            if result_set == 0:
                raise EnergywiseCommunicationFailure(
                    "Failed to query switch usage for %r via %r" % (self.domain, self.address)
                    )
            result_row = energywise_getNextRow(result_set)
            while result_row:
                units = energywise_getAttributeFromRowByType(
                    result_row, EW_ATTRIBUTE_TYPE_UNITS
                    ).value
                usage = energywise_getAttributeFromRowByType(
                    result_row, EW_ATTRIBUTE_TYPE_USAGE
                    ).value * 10**units
                usage_map[self.address] = usage_map.get(self.address,0) + usage
                result_row = energywise_getNextRow(result_set)
        finally:
            if result_set != 0:
                energywise_releaseResult(result_set)
            if cpex_query != 0:
                energywise_releaseQuery(cpex_query)
            self._cpex_disconnect(cpex_session)
        return usage_map
    def cpex_switch_usage(self, importance=100, skipCache=False):
        try:
            return self.parent.cpex_switch_usage_map(importance, skipCache)[self.address]
        except KeyError:
            raise EnergywiseCommunicationFailure("Failed to get usage for switch %r." % self.address)
        raise EUnreachableCode("Executed unreachable code!")
    def _is_snmp_cache_stale(self,timestamp):
        return (self._snmp_cache_time+self.ttl) < timestamp
    def _update_snmp_cache(self,value, timestamp):
        self._snmp_cache_value = value
        self._snmp_cache_time = timestamp
    #caching for snmp switch
    def snmp_switch_usage(self, importance=100, skipCache=False):
        self._snmp_cache_lock.acquire()
        try:
            if skipCache or self._is_snmp_cache_stale(time.time()):
                #fetch actual value
                if self.PROTOCOL_SNMP != self.protocol:
                    raise ESNMPNotEnabled("SNMP is not enabled on %r" %
                                          self.as_node_url())
                batches = self.snmp_switch_agent_node.create_batches(
                    self.snmp_usage_map
                    )
                result = {}
                for batch in batches:
                    result.update(self.snmp_switch_agent_node.get_batch(batch))
                usage = 0
                for usage_key in filter((lambda k:k[0]=='usage'), result.keys()):
                    unit_key = ('units', usage_key[1])
                    usage_value = int(result[usage_key].value)
                    unit_value = int(result[unit_key].value)
                    usage += usage_value*10**unit_value
                self._update_snmp_cache(usage, time.time())
                return usage
            else:
                #use cached value
                return self._snmp_cache_value
        finally:
            self._snmp_cache_lock.release()
        raise EUnreachableCode("Executed unreachable code!")
    def get(self, skipCache=False):
        return self.get_switch_usage()
    def get_result(self, skipCache=False):
        return Result(self.get(skipCache), time.time(), cached=False)
Example #56
0
class XbowCache:
    def __init__(self, timeout, scan_time):
        #self._c == {group_id:{addr:last_msg}}
        self._c = {}
        self._cache_lock = Lock() #can add more granular locking, if need be
        self._subscr_list = {} # == {(group_id,addr):[call_back_meth]}  ... 
        self.timeout = timeout
        if not scan_time:
            self.scan_time = max(self.timeout / 4, 30)
        self._scan_scheduled = None
        
    def stop(self):
        s = self._scan_scheduled
        self._scan_scheduled = None
        if s is not None:
            try:
                s.cancel()
            except:
                pass
                
    def add_group(self, group_id):
        try:
            self._cache_lock.acquire()
            if not self._c.has_key(group_id):
                self._c[group_id] = {}
        finally:
            self._cache_lock.release()
            
    def get_group_ids(self):
        return self._c.keys()
        
    def add_group(self, group_id):
        self._c[group_id] = {}
            
    def add_mote(self, group_id, addr):
        try:
            self._cache_lock.acquire()
            if not self._c.has_key(group_id):
                raise EInvalidValue('cannot add addr to non-existant group', group_id, 'add_mote')
            if not self._c[group_id].has_key(addr):
                self._c[group_id][addr] = None
        finally:
            self._cache_lock.release()
            
    def get_mote_ids(self, group_id):
        return self._c[group_id].keys()
            
    def add_msg(self, msg):
        addr = msg.get_address()
        group_id = msg.get_group()
        id = (group_id, addr)
        try:
            self._cache_lock.acquire()
            if group_id not in self.get_group_ids():
                self.add_group(group_id)
            self._c[group_id][addr] = msg
            if id in self._subscr_list.keys():
                call_backs = self._subscr_list[(group_id, addr)]
                for cb in call_backs:
                    cb()
        finally:
            self._cache_lock.release() 
            
    def get_msg(self, group_id, addr):
        return self._c[group_id][addr]
                       
    def add_callback(self, id, cb):
        #id == (group_id, addr) tuple
        try:
            self._cache_lock.acquire()
            if id in self._subscr_list.keys():
                self._subscr_list[id].append(cb)
            else:
                self._subscr_list[id] = [cb]
                # if we're the only subscribed value, fire off the timeout scanner
                if len(self._subscr_list.keys()) == 1:
                    self.setup_subscr_timer()
        finally:
            self._cache_lock.release()
            
    def remove_callback(self, id, cb):
        # id == (group_id, addr) tuple
        # cb needed just in case there are more than one callback
        try:
            self._cache_lock.acquire()
            if id in self._subscr_list.keys():
                try:
                    self._subscr_list[id].remove(cb)
                except:
                    # hrm, cb went missing
                    pass
        finally:
            self._cache_lock.release()
            
    def setup_subscr_timer(self):
        self._scan_scheduled = scheduler.at(self.next_scan_time(), self.scan, ())
        
    def next_scan_time(self):
        now = int(time.time())
        last_time = (now - (now % self.scan_time))
        next_time = last_time + self.scan_time
        return float(next_time)
        
    def scan(self):
        try:
            # see prev comment about improving lock granularity
            self._cache_lock.acquire()
            for id in self._subscr_list.keys():
                group_id = id[0]
                addr = id[1]
                t = time.time()
                if t - self._c[group_id][addr].time_stamp > self.timeout:
                    # set the msg to None which will result in the ion side generating ETimeout COV msg
                    self._c[group_id][addr] = None
                    for cb in self._subscr_list[id]:
                        try:
                            cb()
                        except:
                            msglog.exception()
            if self._subscr_list.keys():
                self.setup_subscr_timer()
            else:
                self._scan_scheduled = None
        finally:
            self._cache_lock.release()