Exemplo n.º 1
0
 def __init__(self):
     self.__group = None
     self._value = None
     self._old_value = None
     self._last_update = 0
     self.__cv = Condition()
     super(LightPoint, self).__init__()
     EventProducerMixin.__init__(self)
Exemplo n.º 2
0
 def __init__(self):
     CompositeNode.__init__(self)
     EventProducerMixin.__init__(self)
     self._schedule_lock = Lock()
     self._schedule_condition = Condition()
     self._value_lock = Lock()
     self._value_condition = Condition()
     self.__schedule = None
     self.__value = None
Exemplo n.º 3
0
 def __init__(self):
     CompositeNode.__init__(self)
     EventProducerMixin.__init__(self)
     self._schedule_lock = Lock()
     self._schedule_condition = Condition()
     self._value_lock = Lock()
     self._value_condition = Condition()
     self.__schedule = None
     self.__value = None
Exemplo n.º 4
0
 def __init__(self, timeout=2.0):
     self.timeout = timeout
     self.stations = {}
     self._monitor = monitor.ChannelMonitor(self.timeout)
     self.tm_number = self.tm_counter.increment()
     self._response_tp = ThreadPool(1, 'Jace Response Pool')
     self._pending_responses = Queue()
     self._callbacks = {}
     self._running = False
     self._sync_get_lock = Lock()
     self._last_sync_get = uptime.secs()
     self._cv = Condition()
     ImmortalThread.__init__(self, None, None,
                             'Jace Transaction Manager')
     return
Exemplo n.º 5
0
 def test_cross_thread(self):
     # @note:  This test is relying on the write being large enough to
     #         fill all the OS buffers and block.
     #
     # @note:  Methinks this test relies on too many side effects...
     too_big_for_one_write = 1000000
     some_of_but_not_all_of_it = 65536
     stream = CrossThreadStream()
     cv = Condition()
     t1 = Thread(target=_writer, args=(cv,stream,too_big_for_one_write))
     cv.acquire()
     t1.start()
     # @note:  This pause should cause the _writer to block since it is
     #         trying to write too_big_for_one_write.
     pause(2)
     data = stream.read(some_of_but_not_all_of_it)
     count = len(data)
     self.failUnless(data == 'c'*count and
                     count <= some_of_but_not_all_of_it, 'First read ' + 
                     'failed to return the correct data or returned ' + 
                     'too much data')
     while count < too_big_for_one_write:
         data += stream.read(too_big_for_one_write - count)
         count = len(data)
     self.failUnless(data == 'c'*too_big_for_one_write,
                     'Overall stream did not return ' + 
                     'data written to it correctly or the wrong number')
     self.failUnless(stream.read(100) == '', 'Read did not return empty ' + 
                     'string even though no more data should have been ' + 
                     'waiting and the stream closed')
     cv.wait()
     try:
         self.failIf(_failed, _reason)
     finally:
         cv.release()
Exemplo n.º 6
0
 def __init__(self, source_node, timeout=960):
     EventConsumerAbstract.__init__(self)
     self.__node = as_internal_node(source_node)
     self.__cond = Condition()
     self.__event = None
     self.__sched = None  #scheduled action to unsubscribe the point
     self.__timeout = timeout  #number of seconds to maintain subscription
     self._event_received = False
     return
Exemplo n.º 7
0
 def __init__(self, timeout=2.0):
     self.timeout = timeout
     self.stations = {}
     self._monitor = monitor.ChannelMonitor(self.timeout)
     self.tm_number = self.tm_counter.increment()
     self._response_tp = ThreadPool(1, 'Jace Response Pool')
     self._pending_responses = Queue()
     self._callbacks = {}
     self._running = False
     self._sync_get_lock = Lock()
     self._last_sync_get = uptime.secs()
     self._cv = Condition()
     ImmortalThread.__init__(self, None, None, 'Jace Transaction Manager')
     return
Exemplo n.º 8
0
 def test_cross_thread(self):
     # @note:  This test is relying on the write being large enough to
     #         fill all the OS buffers and block.
     #
     # @note:  Methinks this test relies on too many side effects...
     too_big_for_one_write = 1000000
     some_of_but_not_all_of_it = 65536
     stream = CrossThreadStream()
     cv = Condition()
     t1 = Thread(target=_writer, args=(cv, stream, too_big_for_one_write))
     cv.acquire()
     t1.start()
     # @note:  This pause should cause the _writer to block since it is
     #         trying to write too_big_for_one_write.
     pause(2)
     data = stream.read(some_of_but_not_all_of_it)
     count = len(data)
     self.failUnless(
         data == 'c' * count and count <= some_of_but_not_all_of_it,
         'First read ' + 'failed to return the correct data or returned ' +
         'too much data')
     while count < too_big_for_one_write:
         data += stream.read(too_big_for_one_write - count)
         count = len(data)
     self.failUnless(
         data == 'c' * too_big_for_one_write,
         'Overall stream did not return ' +
         'data written to it correctly or the wrong number')
     self.failUnless(
         stream.read(100) == '', 'Read did not return empty ' +
         'string even though no more data should have been ' +
         'waiting and the stream closed')
     cv.wait()
     try:
         self.failIf(_failed, _reason)
     finally:
         cv.release()
Exemplo n.º 9
0
    class _CriticalData:
        def __init__(self):
            self._lock = Lock()
            self._cond = Condition(self._lock)
            self.state = ConnectionMixin.IDLE
            self.connection_count = 0

        def acquire(self):
            return self._lock.acquire()

        def release(self):
            return self._lock.release()

        def wait(self, timeout=None):
            if timeout != None:
                self._cond.wait(timeout)
            else:
                self._cond.wait()

        def notify(self):
            self._cond.notify()

        def increment_connection_count(self):
            self.connection_count += 1
            self.state = ConnectionMixin.UP
            return self.connection_count

        def decrement_connection_count(self):
            self.connection_count -= 1
            if self.connection_count < 1:
                self.state = ConnectionMixin.DOWN

        def get_state(self):
            return self.state

        def set_state(self, state):
            self.state = state
Exemplo n.º 10
0
    class _CriticalData:
        def __init__(self):
            self._lock = Lock()
            self._cond = Condition(self._lock)
            self.state = ConnectionMixin.IDLE
            self.connection_count = 0
        
        def acquire(self):
            return self._lock.acquire()
        
        def release(self):
            return self._lock.release()
        
        def wait(self, timeout=None):
            if timeout != None:
                self._cond.wait(timeout)
            else:
                self._cond.wait()
                
        def notify(self):
            self._cond.notify()

        def increment_connection_count(self):                        
            self.connection_count += 1
            self.state = ConnectionMixin.UP
            return self.connection_count
    
        def decrement_connection_count(self):
            self.connection_count -= 1
            if self.connection_count <1 :
                self.state = ConnectionMixin.DOWN
            
        def get_state(self):
            return self.state
        
        def set_state(self,state):
            self.state = state
Exemplo n.º 11
0
class Schedule(CompositeNode, EventProducerMixin):
    def __init__(self):
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        self._schedule_lock = Lock()
        self._schedule_condition = Condition()
        self._value_lock = Lock()
        self._value_condition = Condition()
        self.__schedule = None
        self.__value = None

    def configure(self, config):
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        return config

    def start(self):
        CompositeNode.start(self)

    def stop(self):
        CompositeNode.stop(self)

    def set_schedule(self, client, schedule):
        self._schedule_lock.acquire()
        self._schedule_condition.acquire()
        try:
            self.__schedule = schedule
            self._schedule_condition.notifyAll()
        finally:
            self._schedule_lock.release()
            self._schedule_condition.release()
        self.event_generate(ScheduleChangedEvent(client, schedule))

    def get_schedule(self):
        self._schedule_lock.acquire()
        try:
            schedule = self.__schedule
        finally:
            self._schedule_lock.release()
        if isinstance(schedule, Exception):
            raise schedule
        return schedule

    ##
    # @param schedule Schedule client believes to be current.
    def get_next_schedule(self, schedule, timeout=None):
        self._schedule_lock.acquire()
        try:
            if schedule is not self.__schedule:
                return self.__schedule
            self._schedule_condition.acquire()
        finally:
            self._schedule_lock.release()
        try:
            self._schedule_condition.wait(timeout)
            schedule = self.__schedule
        finally:
            self._schedule_condition.release()
        if isinstance(schedule, Exception):
            raise schedule
        return schedule

    def is_schedule_current(self, schedule):
        self._schedule_lock.acquire()
        try:
            changed = not schedule is self.__schedule
        finally:
            self._schedule_lock.release()
        return changed

    def _set(self, value):
        self._value_lock.acquire()
        self._value_condition.acquire()
        try:
            old = self.__value
            self.__value = value
            self._value_condition.notifyAll()
        finally:
            self._value_lock.release()
            self._value_condition.release()
        if old != value:
            self.event_generate(ChangeOfValueEvent(self, old, value))

    def get(self, skipCache=0):
        self._value_lock.acquire()
        try:
            value = self.__value
        finally:
            self._value_lock.release()
        return value

    def get_next_value(self, value, timeout=None):
        self._value_lock.acquire()
        try:
            if value != self.__value:
                return self.__value
            self._value_condition.acquire()
        finally:
            self._value_lock.release()
        try:
            self._value_condition.wait(timeout)
            value = self.__value
        finally:
            self._value_condition.release()
        return value

    def is_value_current(self, value):
        self._value_lock.acquire()
        try:
            changed = not value == self.__value
        finally:
            self._value_lock.release()
        return changed
Exemplo n.º 12
0
 class __impl(ImmortalThread):
     tm_counter = Counter(0)
     def __init__(self, timeout=2.0):
         self.timeout = timeout
         self.stations = {}
         self._monitor = monitor.ChannelMonitor(self.timeout)
         self.tm_number = self.tm_counter.increment()
         self._response_tp = ThreadPool(1, 'Jace Response Pool')
         self._pending_responses = Queue()
         self._callbacks = {}
         self._running = False
         self._sync_get_lock = Lock()
         self._last_sync_get = uptime.secs()
         self._cv = Condition()
         ImmortalThread.__init__(self, None, None, 'Jace Transaction Manager')
         return
     def start(self):
         if not self._monitor.is_running():
             self._monitor.start_monitor()
         self._running = True
         self._synchronous_transaction = Transaction(self, None, self._bump_cv)
         self._synchronous_transaction.set_timeout(self.timeout)
         ImmortalThread.start(self)
         return
     def stop(self):
         msglog.log('Jace', INFO, 'Stop Jace Prism Transaction Manger')
         if self._monitor.is_running():
             self._monitor.stop_monitor()
         self._running = False
         return
     def run(self):
         msglog.log('Jace', INFO, 'Starting Jace Prism Transaction Manger.')
         while self._running:
             try:
                 self.send_loop()
                 self.response_loop()
             except:
                 msglog.log('Jace', WARN, 'Jace Transaction Manager - error sending next.')
                 msglog.exception()
         return
     def transaction_completion_handler(self, transaction):
         self.tm_number = self.tm_counter.increment()
         try:
             tid = transaction.tid
             s_id, callback = self._callbacks.get(tid)
             if callback:
                 del self._callbacks[tid]
                 self._pending_responses.put((callback, transaction.get_response()))
         except:
             msglog.exception()
         # recycle the transaction for reuse within the queue
         self.stations.get(s_id).put_transaction(transaction)
         return
     def add_station(self, station):
         s_id = station.get_id()
         self.stations[s_id] = station
         return
     def get_synchronous(self, station, rqst):
         self._sync_get_lock.acquire()
         try:
             t = self._synchronous_transaction
             hdr = self._get_auth_header(station)
             hdr['Connection'] = 'close'
             t.build_request(rqst.url, None, hdr)
             self._cv.acquire()
             try:
                 response = ETimeout()
                 try:
                     t.send_request()
                     self._cv.wait(self.timeout)
                     self._last_sync_get = uptime.secs()
                     if t.is_expired():
                         t.cancel()
                     else:
                         response = t.get_response()
                 except:
                     t.cancel()
             finally:
                 self._cv.release()
             return response
         finally:
             self._sync_get_lock.release()
         return
     def _bump_cv(self, transaction):
         # transaction isn't used
         self._cv.acquire()
         self._cv.notify()
         self._cv.release()
         return
     def send_loop(self):
         for s_id, station in self.stations.items():
             for i in range(station.transaction_limit):
                 try:
                     t, rqst = station.get_next()
                 except Empty:
                     break
                 hdr = self._get_auth_header(station)
                 hdr['Connection'] = 'close'
                 t.build_request(rqst.url, None, hdr)
                 self._callbacks[t.tid] = (s_id, rqst.callback)
                 t.send_request()
         return
     def response_loop(self):
         while 1:
             try:
                 callback, rsp = self._pending_responses.get(False)
                 callback(rsp)
             except Empty:
                 return
             except:
                 msglog.log('Jace', WARN, 'Unexpected error in response_loop')
                 msglog.exception()
         return
     def _get_auth_header(self, station):
         return {"Authorization":
                 "Basic %s" % station.base64string}
Exemplo n.º 13
0
 def __init__(self):
     self._lock = Lock()
     self._cond = Condition(self._lock)
     self.state = ConnectionMixin.IDLE
     self.connection_count = 0
Exemplo n.º 14
0
    class __impl(ImmortalThread):
        tm_counter = Counter(0)

        def __init__(self, timeout=2.0):
            self.timeout = timeout
            self.stations = {}
            self._monitor = monitor.ChannelMonitor(self.timeout)
            self.tm_number = self.tm_counter.increment()
            self._response_tp = ThreadPool(1, 'Jace Response Pool')
            self._pending_responses = Queue()
            self._callbacks = {}
            self._running = False
            self._sync_get_lock = Lock()
            self._last_sync_get = uptime.secs()
            self._cv = Condition()
            ImmortalThread.__init__(self, None, None,
                                    'Jace Transaction Manager')
            return

        def start(self):
            if not self._monitor.is_running():
                self._monitor.start_monitor()
            self._running = True
            self._synchronous_transaction = Transaction(
                self, None, self._bump_cv)
            self._synchronous_transaction.set_timeout(self.timeout)
            ImmortalThread.start(self)
            return

        def stop(self):
            msglog.log('Jace', INFO, 'Stop Jace Prism Transaction Manger')
            if self._monitor.is_running():
                self._monitor.stop_monitor()
            self._running = False
            return

        def run(self):
            msglog.log('Jace', INFO, 'Starting Jace Prism Transaction Manger.')
            while self._running:
                try:
                    self.send_loop()
                    self.response_loop()
                except:
                    msglog.log(
                        'Jace', WARN,
                        'Jace Transaction Manager - error sending next.')
                    msglog.exception()
            return

        def transaction_completion_handler(self, transaction):
            self.tm_number = self.tm_counter.increment()
            try:
                tid = transaction.tid
                s_id, callback = self._callbacks.get(tid)
                if callback:
                    del self._callbacks[tid]
                    self._pending_responses.put(
                        (callback, transaction.get_response()))
            except:
                msglog.exception()
            # recycle the transaction for reuse within the queue
            self.stations.get(s_id).put_transaction(transaction)
            return

        def add_station(self, station):
            s_id = station.get_id()
            self.stations[s_id] = station
            return

        def get_synchronous(self, station, rqst):
            self._sync_get_lock.acquire()
            try:
                t = self._synchronous_transaction
                hdr = self._get_auth_header(station)
                hdr['Connection'] = 'close'
                t.build_request(rqst.url, None, hdr)
                self._cv.acquire()
                try:
                    response = ETimeout()
                    try:
                        t.send_request()
                        self._cv.wait(self.timeout)
                        self._last_sync_get = uptime.secs()
                        if t.is_expired():
                            t.cancel()
                        else:
                            response = t.get_response()
                    except:
                        t.cancel()
                finally:
                    self._cv.release()
                return response
            finally:
                self._sync_get_lock.release()
            return

        def _bump_cv(self, transaction):
            # transaction isn't used
            self._cv.acquire()
            self._cv.notify()
            self._cv.release()
            return

        def send_loop(self):
            for s_id, station in self.stations.items():
                for i in range(station.transaction_limit):
                    try:
                        t, rqst = station.get_next()
                    except Empty:
                        break
                    hdr = self._get_auth_header(station)
                    hdr['Connection'] = 'close'
                    t.build_request(rqst.url, None, hdr)
                    self._callbacks[t.tid] = (s_id, rqst.callback)
                    t.send_request()
            return

        def response_loop(self):
            while 1:
                try:
                    callback, rsp = self._pending_responses.get(False)
                    callback(rsp)
                except Empty:
                    return
                except:
                    msglog.log('Jace', WARN,
                               'Unexpected error in response_loop')
                    msglog.exception()
            return

        def _get_auth_header(self, station):
            return {"Authorization": "Basic %s" % station.base64string}
Exemplo n.º 15
0
class LightPoint(CompositeNode, EventProducerMixin):
    def __init__(self):
        self.__group = None
        self._value = None
        self._old_value = None
        self._last_update = 0
        self.__cv = Condition()
        super(LightPoint, self).__init__()
        EventProducerMixin.__init__(self)
        
    def configure(self, cd):
        super(LightPoint, self).configure(cd)
        set_attribute(self, 'lightpoint_id', REQUIRED, cd, int)
        set_attribute(self, 'timeout', 2, cd, int)
        #relay number 5 actuates lights
        set_attribute(self, 'relay_num', 5, cd, int)
        
    def configuration(self):
        cd = super(LightPoint, self).configuration()
        get_attribute(self, 'lightpoint_id', cd)
        get_attribute(self, 'timeout', cd)
        get_attribute(self, 'relay_num', cd)
        return cd
        
    def start(self):
        self.parent.parent.register(self)
        super(LightPoint, self).start()
        
    def get(self, skipCache=0):
        rslt = self._value
        # cache miss
        if (uptime.secs() - self._last_update) > self.group.ttl:
            # motes periodically push updates - if it's been silent for 
            # too long force an update.  @fixme: the first read will still
            # return stale data - add blocking call and ETimeout logic
            last = self._last_update
            self.__cv.acquire()
            try:
                try:
                    self._force_update()
                except:
                    # an error using the XCommand interface occured
                    # raise an exception but do not cache the ETimeout
                    msglog.exception()
                    raise ETimeout()
                self.__cv.wait(self.timeout)
                if last != self._last_update:
                    # an update has occurred
                    rslt = self._value
                else:
                    self._last_update = uptime.secs()
                    # let ETimeouts affect our value caching as well,
                    # if a better update comes from the mesh, great.
                    rslt = self._value = ETimeout()
            finally:
                self.__cv.release()
        if isinstance(rslt, ETimeout):
            raise rslt
        return rslt
        
    def set(self, value):
        value = int(value)
        if value < 0 or value > 1:
            raise EInvalidValue()
        self.group.server.actuate(self.lightpoint_id, self.relay_num, value)
        
    def has_cov(self):
        return 1
        
    def event_subscribe(self, *args):
        super(LightPoint, self).event_subscribe(self, *args)
        self._old_value = self.get()
        # generate initial event
        self.event_generate(ChangeOfValueEvent(self, self._old_value, 
                            self._old_value, time.time()))
        
    def _force_update(self):
        ACTION_NONE = 2
        self.group.server.actuate(self.lightpoint_id, self.relay_num, ACTION_NONE)
        
    def update(self, data):
        self.__cv.acquire()
        try:
            self._old_value = self._value
            self._value = data.get('relayState1')
            self._last_update = uptime.secs()
            self.__cv.notifyAll()
            self.event_generate(ChangeOfValueEvent(self, self._old_value, 
                                self._value, time.time()))
        finally:
            self.__cv.release()
        
    def __get_group(self):
        if self.__group is None:
            self.__group = self.parent
        return self.__group
        
    group = property(__get_group)
Exemplo n.º 16
0
class Schedule(CompositeNode,EventProducerMixin):
    def __init__(self):
        CompositeNode.__init__(self)
        EventProducerMixin.__init__(self)
        self._schedule_lock = Lock()
        self._schedule_condition = Condition()
        self._value_lock = Lock()
        self._value_condition = Condition()
        self.__schedule = None
        self.__value = None
    def configure(self,config):
        CompositeNode.configure(self,config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        return config
    def start(self):
        CompositeNode.start(self)
    def stop(self):
        CompositeNode.stop(self)
    def set_schedule(self,client,schedule):
        self._schedule_lock.acquire()
        self._schedule_condition.acquire()
        try:
            self.__schedule = schedule
            self._schedule_condition.notifyAll()
        finally:
            self._schedule_lock.release()
            self._schedule_condition.release()
        self.event_generate(ScheduleChangedEvent(client,schedule))
    def get_schedule(self):
        self._schedule_lock.acquire()
        try:
            schedule = self.__schedule
        finally:
            self._schedule_lock.release()
        if isinstance(schedule,Exception):
            raise schedule
        return schedule
    ##
    # @param schedule Schedule client believes to be current.
    def get_next_schedule(self,schedule,timeout=None):
        self._schedule_lock.acquire()
        try:
            if schedule is not self.__schedule:
                return self.__schedule
            self._schedule_condition.acquire()
        finally:
            self._schedule_lock.release()
        try:
            self._schedule_condition.wait(timeout)
            schedule = self.__schedule
        finally:
            self._schedule_condition.release()
        if isinstance(schedule,Exception):
            raise schedule
        return schedule
    def is_schedule_current(self,schedule):
        self._schedule_lock.acquire()
        try:
            changed = not schedule is self.__schedule
        finally:
            self._schedule_lock.release()
        return changed
    def _set(self,value):
        self._value_lock.acquire()
        self._value_condition.acquire()
        try:
            old = self.__value
            self.__value = value
            self._value_condition.notifyAll()
        finally:
            self._value_lock.release()
            self._value_condition.release()
        if old != value:
            self.event_generate(ChangeOfValueEvent(self,old,value))
    def get(self, skipCache=0):
        self._value_lock.acquire()
        try:
            value = self.__value
        finally:
            self._value_lock.release()
        return value
    def get_next_value(self,value,timeout=None):
        self._value_lock.acquire()
        try:
            if value != self.__value:
                return self.__value
            self._value_condition.acquire()
        finally:
            self._value_lock.release()
        try:
            self._value_condition.wait(timeout)
            value = self.__value
        finally:
            self._value_condition.release()
        return value
    def is_value_current(self,value):
        self._value_lock.acquire()
        try:
            changed = not value == self.__value
        finally:
            self._value_lock.release()
        return changed
Exemplo n.º 17
0
 def __init__(self):
     self._lock = Lock()
     self._cond = Condition(self._lock)
     self.state = ConnectionMixin.IDLE
     self.connection_count = 0