def _tick(self): reschedule = [] entries = self._entries.popentries(uptime.secs()) self._condition.release() try: for entry in entries: try: entry.execute() except EActionExpired: pass except EActionPremature: message = 'entry %s execution premature, will reschedule' self.debugout(message, STANDARDDEBUG, entry) reschedule.append(entry) except: msglog.exception() else: if isinstance(entry, RecurringEntry): reschedule.append(entry) finally: self._condition.acquire() for entry in reschedule: entry.computewhen() self._entries.addentries(reschedule) nextrun = self._entries.nextruntime() if nextrun is not None: nextrun = max(0, nextrun - uptime.secs()) return nextrun
def _test_a(self): point_1 = _uptime.secs() time.sleep(1) point_2 = _uptime.secs() diff = point_2 - point_1 assert abs(1 - diff) < .01, "Difference should be very close to 1, was %f" % diff assert diff > 0, "Difference should be positive"
def get(self, skipCache=0): rslt = self._value # cache miss if (uptime.secs() - self._last_update) > self.group.ttl: # motes periodically push updates - if it's been silent for # too long force an update. @fixme: the first read will still # return stale data - add blocking call and ETimeout logic last = self._last_update self.__cv.acquire() try: try: self._force_update() except: # an error using the XCommand interface occured # raise an exception but do not cache the ETimeout msglog.exception() raise ETimeout() self.__cv.wait(self.timeout) if last != self._last_update: # an update has occurred rslt = self._value else: self._last_update = uptime.secs() # let ETimeouts affect our value caching as well, # if a better update comes from the mesh, great. rslt = self._value = ETimeout() finally: self.__cv.release() if isinstance(rslt, ETimeout): raise rslt return rslt
def goto(self, key_path): #send keys to destination pages & absorb all values path = self.key_path path = path.lower() keys = path.split( ' ' ) #turn string into list of keywords - pun intended - that will be sent to device self.set_active_screen(self) #allow changes to this screen self.screen = None #after final key, clear the screen object to get a fresh decode eos = 1 #default to wait to make sure no more data for k in keys: eos = self._send_key(k) #self.screen = None #after final key, clear the screen object to get a fresh list of texts decoded if self.debug > 1: print 'start looking for values from screen' #now wait for string that indicated we have read in the page. points = filter(lambda c: isinstance(c, Point), self.children_nodes()) point_count = len(points) then = uptime.secs() if self.debug > 1: print 'remaining points: ', [p.name for p in points] while points: #keep polling until all points have been seen at least once #filter out points that have received values since clearning screen points = filter(lambda p: p._get(self.screen) is None, points) if self.debug > 1 and len(points): print 'remaining points: ', [p.name for p in points] if uptime.secs() > (then + self.timeout): if self.debug: print CSI_RED + 'Timedout waiting for screen values' + CSI_Reset if len(points): print 'remaining points: ', [p.name for p in points] break #timeout has occured self.poll_for_incomming_packets() self.points_screen = self.screen #make new values available to point nodes if eos: #true if "screen complete" was received so _send_key did not wait around for new elements to stop showing up eop = 0 for i in range( 20): #read in rest of screen for getting points list old_count = len(self.screen.texts) self.poll_for_incomming_packets() if self.screen_complete: break #this flag is reset when a zero length update is recevied if old_count == len(self.screen.texts): if eop: break #no new points displayed for 2nd time, we are done. eop = 1 else: if self.debug: print CSI_RED + 'Screen did not complete for value page' + CSI_Reset self.last_screen = self.screen #if any new values came since nodes were satisfied, make available for texts() command self.set_active_screen(None) #block any further changes to this screen if self.debug: if points: print CSI_RED + 'CPC timeout on screen', self.as_node_url(), for p in points: print p.name, print ' were all not read' + CSI_Reset else: print CSI_GREEN + 'CPC completed ' + str( point_count) + ' points for screen:', self.as_node_url( ), CSI_Reset
def wait_for_response(self): then = uptime.secs() answer = self.poll_for_incomming_packets() while answer is None: if uptime.secs() > (then + self.timeout): break answer = self.poll_for_incomming_packets() return answer
def _wait_for_skew_detection(self, scheduler, timerep, timeout): if debug: print '%f: Entering _wait_for_skew_detection' % uptime.secs() scheduler.set_checktime_callback(timerep.notify_detected) timerep.await_detection(timeout) scheduler.set_checktime_callback(None) if debug: print '%f: Returning from _wait_for_skew_detection' % uptime.secs()
def _test_a(self): point_1 = _uptime.secs() time.sleep(1) point_2 = _uptime.secs() diff = point_2 - point_1 assert abs( 1 - diff) < .01, "Difference should be very close to 1, was %f" % diff assert diff > 0, "Difference should be positive"
def _waitupto(self, timeout, waitlock): startuptime = uptime.secs() while not timeout < 0: msecs = min(timeout * 1000, self.MSECSMAX) readable = self._pollreadable(msecs) if waitlock.locked(): curuptime = uptime.secs() lapsetime = curuptime - startuptime timeout = timeout - lapsetime else: break else: return False return True
def goto(self, key_path): #send keys to destination pages & absorb all values path = self.key_path path = path.lower() keys = path.split(' ') #turn string into list of keywords - pun intended - that will be sent to device self.set_active_screen(self) #allow changes to this screen self.screen = None #after final key, clear the screen object to get a fresh decode eos = 1 #default to wait to make sure no more data for k in keys: eos = self._send_key(k) #self.screen = None #after final key, clear the screen object to get a fresh list of texts decoded if self.debug > 1: print 'start looking for values from screen' #now wait for string that indicated we have read in the page. points = filter(lambda c: isinstance(c, Point), self.children_nodes()) point_count = len(points) then = uptime.secs() if self.debug > 1: print 'remaining points: ', [p.name for p in points] while points: #keep polling until all points have been seen at least once #filter out points that have received values since clearning screen points = filter(lambda p: p._get(self.screen) is None, points) if self.debug > 1 and len(points): print 'remaining points: ', [p.name for p in points] if uptime.secs() > (then + self.timeout): if self.debug: print CSI_RED+'Timedout waiting for screen values'+CSI_Reset if len(points): print 'remaining points: ', [p.name for p in points] break #timeout has occured self.poll_for_incomming_packets() self.points_screen = self.screen #make new values available to point nodes if eos: #true if "screen complete" was received so _send_key did not wait around for new elements to stop showing up eop = 0 for i in range(20): #read in rest of screen for getting points list old_count = len(self.screen.texts) self.poll_for_incomming_packets() if self.screen_complete: break #this flag is reset when a zero length update is recevied if old_count == len(self.screen.texts): if eop: break #no new points displayed for 2nd time, we are done. eop = 1 else: if self.debug: print CSI_RED+'Screen did not complete for value page'+CSI_Reset self.last_screen = self.screen #if any new values came since nodes were satisfied, make available for texts() command self.set_active_screen(None) #block any further changes to this screen if self.debug: if points: print CSI_RED+'CPC timeout on screen', self.as_node_url(), for p in points: print p.name, print ' were all not read'+CSI_Reset else: print CSI_GREEN+'CPC completed '+str(point_count)+' points for screen:', self.as_node_url(), CSI_Reset
def callback(self, *args): #print len(args) if len(args) == 2: #append something to the passed in args args[1].append(args[0]) if debug: print '%f: In callback with %s.' % (uptime.secs(), str(args))
def _test_cases(self): # if self.case == 100: nodeurl = '/services/time' num_iters = 200 st = up.secs() for i in range(0, num_iters): rsp = self.server.rna_xmlrpc2.invoke(self.session, nodeurl,'get', ) end = up.secs() print '%d gets took %f seconds.' % (num_iters,end-st)
def debug_output(self, message=None, location=None): """ Use preferred 'debugout' method instead. """ if self._debug: if message: print '%f: SCHEDULER: %s' % (uptime.secs(), message)
def get_synchronous(self, station, rqst): self._sync_get_lock.acquire() try: t = self._synchronous_transaction hdr = self._get_auth_header(station) hdr['Connection'] = 'close' t.build_request(rqst.url, None, hdr) self._cv.acquire() try: response = ETimeout() try: t.send_request() self._cv.wait(self.timeout) self._last_sync_get = uptime.secs() if t.is_expired(): t.cancel() else: response = t.get_response() except: t.cancel() finally: self._cv.release() return response finally: self._sync_get_lock.release() return
def get_next(self): try: transaction = self._transaction_q.get(False) self._backed_up = False except Empty: if self._backed_up: self.recycle_expired_transactions() transaction = self._transaction_q.get(False) else: self._backed_up = True raise now = uptime.secs() fresh = [] while 1: try: rqst = self._request_q.get(False) if (now - rqst.last_update) > rqst.ttl: break fresh.append(rqst) except Empty: rqst = None break map(self._request_q.put, fresh) if rqst is None: self.put_transaction(transaction) raise Empty return (transaction, rqst)
def update_cache(self, value_obj): for name, func in self._subscribers.items(): value = value_obj.get(name) func(value) self._last_value = value self._last_rcvd = uptime.secs() return
def __init__(self): self.source = None self.updated = 0 self.synclock = RLock() self.result = Undefined self.support_cov = False self.created = uptime.secs() CompositeNode.__init__(self) EventProducerMixin.__init__(self)
def test_acquire_timeout(self): _dlocks.approach = 2 l = allocate2() l.acquire() for i in (0.2, 1, 2, 5, 10): set_lock_attributes(l, i, "Test Lock #1") st_time = uptime.secs() try: l.acquire() except: pass en_time = uptime.secs() elapsed_time = en_time - st_time if abs(elapsed_time - i) > 0.2: mstr = "Did not timeout in the specified, time " mstr += "(%f seconds). " % i mstr += "Instead got %f seconds." % elapsed_time raise mstr
def test_acquire_timeout(self): _dlocks.approach = 2 l = allocate2() l.acquire() for i in (.2, 1, 2, 5, 10): set_lock_attributes(l, i, 'Test Lock #1') st_time = uptime.secs() try: l.acquire() except: pass en_time = uptime.secs() elapsed_time = en_time - st_time if abs(elapsed_time - i) > .2: mstr = 'Did not timeout in the specified, time ' mstr += '(%f seconds). ' % i mstr += 'Instead got %f seconds.' % elapsed_time raise mstr
def __init__(self, qid, iterator, **kw): self.qid = qid self.complete = False self.iterator = iterator self.returned = Counter() self.timeout = kw.get("timeout", 300) self.default_count = kw.get("count", 1000) self.created = self.touched = uptime.secs() super(Query, self).__init__()
def get(self, block=True, timeout=None): if not self.__legacy: return _Queue.get(self, block, timeout) # 2.2 if block and timeout is None: #block indefinitely #return super(Queue, self).get() _Queue.get(self) if timeout: endtime = uptime.secs() + timeout while not self.qsize(): remaining = endtime - uptime.secs() if remaining <= 0.0: break ## # we've either timed out or data is available. # either immediately return the data or raise Queue.Empty #return super(Queue, self).get(False) return _Queue.get(self, False)
def send_request(self): channel = Channel(self.tm._monitor) self.set_channel(channel) channel.socket = None channel.setup_connection(self.request.get_host(), self.request.get_port(), self.request.get_type()) channel.send_request(self.request) self.send_time = uptime.secs() return
def trim_expired(self): count = 0 self.synclock.acquire() try: count += len(self._trim_clients()) count += len(self._trim_events()) self._last_trimmed = uptime.secs() finally: self.synclock.release() return count
def update(self, data): self.__cv.acquire() try: self._old_value = self._value self._value = data.get('relayState1') self._last_update = uptime.secs() self.__cv.notifyAll() self.event_generate(ChangeOfValueEvent(self, self._old_value, self._value, time.time())) finally: self.__cv.release()
def update_value(self, value): if self._cached_result: previous = self._cached_result.value if value == previous: return False changes = self._cached_result.changes + 1 else: changes = 1 previous = None self._cached_result = Result(value, uptime.secs(), 1, changes) self._trigger_cov(previous, value, time.time()) return True
def send_request(self): channel = Channel(self.tm._monitor) self.set_channel(channel) channel.socket = None channel.setup_connection( self.request.get_host(), self.request.get_port(), self.request.get_type() ) channel.send_request(self.request) self.send_time = uptime.secs() return
def acquire(self, blocking=1): if not blocking: return self.real_lock.acquire(blocking) # Wait up to timeout seconds to acquire the lock. If we can't, then # raise some hell. st_time = uptime.secs() while 1: result = self.real_lock.acquire(0) if result: self.locker = currentThread() # We got our lock, return return result cur_time = uptime.secs() if cur_time - st_time > self.timeout: break time.sleep(.1) # If we get here, we didn't acquire our lock in time. # self._logMsg('Possible deadlock warning!!') mstr = ("Could not acquire lock (%s) within %d seconds! " "Locker is %s.") % (str( self.name), self.timeout, str(self.locker)) raise _LockAssertion(mstr)
def __init__(self): self.fields = () self.values = [] self.seen = set() self.itemmap = {} self.stop = -1 self.start = -1 self.query = None self.reversed = None self.query_to_time = None self.query_from_time = None self.created = self.touched = uptime.secs() super(QueryClient, self).__init__()
def update(self, msg): self._last_rcvd = uptime.secs() if self._cached_result is None: change_count = 1 last_value = None else: change_count = self._cached_result.changes + 1 last_value = self._cached_result.value if isinstance(msg, Exception): value = msg else: value = getattr(msg, self.prop_name) self._cached_result = Result(value, time.time(), changes=change_count) self._trigger_cov(last_value)
def __init__(self, timeout=2.0): self.timeout = timeout self.stations = {} self._monitor = monitor.ChannelMonitor(self.timeout) self.tm_number = self.tm_counter.increment() self._response_tp = ThreadPool(1, 'Jace Response Pool') self._pending_responses = Queue() self._callbacks = {} self._running = False self._sync_get_lock = Lock() self._last_sync_get = uptime.secs() self._cv = Condition() ImmortalThread.__init__(self, None, None, 'Jace Transaction Manager') return
def acquire(self, blocking=1): if not blocking: return self.real_lock.acquire(blocking) # Wait up to timeout seconds to acquire the lock. If we can't, then # raise some hell. st_time = uptime.secs() while 1: result = self.real_lock.acquire(0) if result: self.locker = currentThread() # We got our lock, return return result cur_time = uptime.secs() if cur_time - st_time > self.timeout: break time.sleep(.1) # If we get here, we didn't acquire our lock in time. # self._logMsg('Possible deadlock warning!!') mstr = ("Could not acquire lock (%s) within %d seconds! " "Locker is %s.") % ( str(self.name), self.timeout, str(self.locker) ) raise _LockAssertion(mstr)
def update(self, msg): self._last_rcvd = uptime.secs() if self._cached_result is None: change_count = 1 last_value = None else: change_count = self._cached_result.changes + 1 last_value = self._cached_result.value if isinstance(msg, Exception): value = msg else: value = getattr(msg, self.prop_name) self._cached_result = Result( value, time.time(), changes=change_count ) self._trigger_cov(last_value)
def event_subscribe(self, *args): self._subscription_lock.acquire() try: already_subscribed = self.event_has_subscribers() EventProducerMixin.event_subscribe(self, *args) if self.parent.can_bundle() and self.bundle: self.parent.subscribe(self.prop_name, self.update_cache) elif not already_subscribed: self.update_continuous(None) if self._cached_result and \ (uptime.secs() - self._cached_result.timestamp) < self.ttl: self._trigger_cov(self._cached_result.value, self._cached_result.value, time.time()) finally: self._subscription_lock.release() return
def update_continuous(self, rsp): value = None if rsp is not None: if not isinstance(rsp, Exception): if not rsp.is_complete(): rsp.await_completion(self.station.timeout) rsp = rsp.read() value = self.decode(rsp) self.update_cache(ValueObj(value)) if not self.event_has_subscribers(): # don't keep on, keepin on. return self._rqst.set_last_update(uptime.secs()) else: self._rqst.set_callback(self.update_continuous) self.station.add_request(self._rqst) return
def event_subscribe(self, *args): self._subscription_lock.acquire() try: already_subscribed = self.event_has_subscribers() EventProducerMixin.event_subscribe(self, *args) if self.parent.can_bundle() and self.bundle: self.parent.subscribe(self.prop_name, self.update_cache) elif not already_subscribed: self.update_continuous(None) if self._cached_result and \ (uptime.secs() - self._cached_result.timestamp) < self.ttl: self._trigger_cov( self._cached_result.value, self._cached_result.value, time.time() ) finally: self._subscription_lock.release() return
def trim_expired_caches(self): if not self.cache_lock.locked(): raise Exception('Must be locked to trim caches.') removed = [] now = uptime.secs() allitems = self.caches[None] for guid,closed in self.close_events.items(): if (now - closed) > self.closed_event_ttl: if guid in allitems: del(allitems[guid]) del(self.close_events[guid]) for cid,cache in self.caches.items(): if cid and (cache.since_touched() > self.cache_ttl): del(self.caches[cid]) removed.append(cid) if self.debug and removed: print 'Cache trim trimmed the following IDs: %s.' % (removed,) return removed
def trim_expired_caches(self): if not self.cache_lock.locked(): raise Exception('Must be locked to trim caches.') removed = [] now = uptime.secs() allitems = self.caches[None] for guid, closed in self.close_events.items(): if (now - closed) > self.closed_event_ttl: if guid in allitems: del (allitems[guid]) del (self.close_events[guid]) for cid, cache in self.caches.items(): if cid and (cache.since_touched() > self.cache_ttl): del (self.caches[cid]) removed.append(cid) if self.debug and removed: print 'Cache trim trimmed the following IDs: %s.' % (removed, ) return removed
def update_cache(self, value): now = uptime.secs() if isinstance(value, ValueObj): value = value.get(self.prop_name) if value is None or isinstance(value, Exception): value = ETimeout() if value != self._cached_value: if self.event_has_subscribers(): self._trigger_cov(self._cached_value, value, time.time()) self._cached_value = value if self._cached_result is None: changes = 0 else: changes = self._cached_result.changes + 1 self._cached_result = Result(self._cached_value, self._last_rcvd, changes) self._last_rcvd = now return
def update_cache(self, value): now = uptime.secs() if isinstance(value, ValueObj): value = value.get(self.prop_name) if value is None or isinstance(value, Exception): value = ETimeout() if value != self._cached_value: if self.event_has_subscribers(): self._trigger_cov(self._cached_value, value, time.time()) self._cached_value = value if self._cached_result is None: changes = 0 else: changes = self._cached_result.changes + 1 self._cached_result = Result( self._cached_value, self._last_rcvd, changes ) self._last_rcvd = now return
def subscribe(self, name, func): self._subscription_lock.acquire() try: ## # if there are multiple external consumers, they are subscribed # via event producing child node. self._subscribers[name] = func self._subscribed += 1 if self._last_value and (uptime.secs() - self._last_rcvd) < self.ttl: try: value = self._last_value.get(name) func(value) except: pass if self._subscribed == 1: self.update_continuous(None) finally: self._subscription_lock.release() return
def test_run_many(self): cursystime = _time.time() curuptime = uptime.secs() # Create 1000 entries count = 1000 # Earliest entry executes in 20 seconds. offset = 5 # Schedule for random value between offset and offset + range variation = 5 # Generate offset list offsets = [offset + (variation * random()) for i in range(count)] sched = mpx.lib.scheduler.Scheduler() sched.setdebug(debug) sched.start() entries = [sched.after(offset, self.callback2) for offset in offsets] pause(offset + variation) callbacks = self.callback_count assert callbacks == count, 'Counted %d, not %d' % (callbacks, count) assert len(sched._entries) == 1, 'More than one entry left' sched.stop()
def handle_alarm_update(self, event): if self.debug: tstart = time.time() if isinstance(event, StateEvent): event = event.source if event.is_state('closed'): self.close_events[event.GUID] = uptime.secs() self.event_queue.enqueue(event) if self.cache_lock.acquire(0): try: self.trim_expired_caches() self.process_events(self.event_queue.popqueue()) finally: self.cache_lock.release() else: print 'Alarm update not processing queue; locked.' if self.debug: tend = time.time() tlapse = tend - tstart print 'Took RSS2 Syndic %s secs to handle alarm event.' % tlapse return