def doInit(self, mode): BaseCacheClient.doInit(self, mode) # this is run in the main thread self._handler = Handler(self.log, self.plotformat) # the execution master lock needs to be refreshed every now and then self._islocked = False self._lock_expires = 0. self._locktimeout = 5.
def doInit(self, mode): if self.update_uri is None: self.log.warning('No update URI configured, updates will not be ' 'sent') if not nicoskeystore.getCredential(self.tokenid): self.log.warning('No token %s found in keystore, updates will not ' 'be sent' % self.tokenid) BaseCacheClient.doInit(self, mode)
def doInit(self, mode): BaseCacheClient.doInit(self, mode) self.__dict__['signals'] = CacheSignals() self._db = {} self._dblock = threading.Lock() # since the base cache client automatically reconnects, we use # this flag to override self._should_connect = False self._worker.start()
def _connect_action(self): self._keys_expired = False BaseCacheClient._connect_action(self) if self.showwatchdog: # also ask for and subscribe to all watchdog events self._socket.sendall(to_utf8('@watchdog/%s\n' % OP_WILDCARD)) self._socket.sendall(to_utf8('@watchdog/%s\n' % OP_SUBSCRIBE)) # use appname to distinguish between different instances self.storeSysInfo(session.appname)
def _connect_action(self): # inhibit direct processing of updates self._process_updates = False try: BaseCacheClient._connect_action(self) # now process all keys we got time = currenttime() for key in list(self._keydict): try: self._process_key(time, key, self._keydict[key]) except Exception: self.log.warning('error handling first update for key %s', key, exc=1) finally: self._process_updates = True self.storeSysInfo('watchdog') self._queue.put('watchdog/%s\n' % OP_SUBSCRIBE) self._publish_config()
def doInit(self, mode): BaseCacheClient.doInit(self, mode) # cache of all interesting keys with current values self._keydict = LCDict() # put status constants in key dict to simplify status conditions for stval, stname in status.statuses.items(): self._keydict[stname.upper()] = stval # set to true during connect action self._process_updates = False # current setups self._setups = set() # mapping entry ids to entrys self._entries = {} # (mangled) key to update mail receivers self._mailreceiverkey = self.mailreceiverkey.replace('/', '_').lower() # mapping cache keys to entries that check this key self._keymap = {'session_mastersetup': set()} if self._mailreceiverkey: self._keymap[self._mailreceiverkey] = set() # current warnings: mapping entry ids to the string description self._warnings = OrderedDict() # current count loop pause reasons: mapping like self._warnings self._pausecount = OrderedDict() # create all notifier devices self._all_notifiers = [] self._notifiers = {'': []} for key, devnames in self.notifiers.items(): self._notifiers[key] = notiflist = [] for devname in devnames: dev = session.getDevice(devname, Notifier) notiflist.append(dev) self._all_notifiers.append(dev) # process entries in the default watchlist for entry_dict in self.watch: self._add_entry(entry_dict, 'watchdog') # start a thread checking for modification of the setup file createThread('refresh checker', self._checker)
def doShutdown(self): self._handler.close() BaseCacheClient.doShutdown(self)
def _disconnect(self, why=None): if self._islocked and self._stoprequest and self._connected: self._islocked = False self.unlock('elog') BaseCacheClient._disconnect(self, why)
def doInit(self, mode): BaseCacheClient.doInit(self, mode) self._initFilters() for service in self._attached_forwarders: service._startWorker()
def doInit(self, mode): BaseCacheClient.doInit(self, mode) self._initFilters()
def _connect_action(self): BaseCacheClient._connect_action(self) self.sendUpdate()
def _connect_action(self): # clear the local database before filling it up with self._dblock: self._db.clear() BaseCacheClient._connect_action(self) self.signals.connected.emit()