def createManagerAndView(cacheDir=None, useMemoChannel=False): viewChannel, managerChannel = SharedStateNative.InMemoryChannelWithoutMemo(callbackScheduler) storage = None if cacheDir: storage = SharedStateNative.Storage.FileStorage( cacheDir, 10, .1, useMemoChannel ) manager = SharedStateNative.KeyspaceManager( 0, 1, 0x7fffffff, 0x7fffffff, '', storage) manager.add(managerChannel) view = DummyView(viewChannel) view.initialize() return view, manager
def subscribe(): try: self.asyncView.subscribeToKeyspace(SharedStateNative.getClientInfoKeyspace(), 1, self.onNewClientInfo) doneDeferred = self.asyncView.subscribeToKeyspace(self.workerStatusKeyspace, 0, self.onNewWorkerStatus) doneDeferred.addCallbacks(subscribeCallback, lambda exception : None) except UserWarning as ex: logging.warn("Failed to subscribe to asyncView keyspace because we disconnected from shared state while reconnecting to shared state")
def KeyspaceManager(randomSeed, numManagers, backupInterval=60 * 10, pingInterval=20, cachePathOverride=None, maxOpenFiles=None, maxLogFileSizeMb=10): if cachePathOverride is None: cachePathOverride = Setup.config().sharedStateCache if maxOpenFiles is None: import resource maxOpenFiles = min( resource.getrlimit(resource.RLIMIT_NOFILE)[0] / 2, 1000) if cachePathOverride != "": logging.info( "Creating FileStorage(cachePathOverride=%s, maxOpenFiles=%s, maxLogFileSizeMb=%s)", cachePathOverride, maxOpenFiles, maxLogFileSizeMb) storage = SharedStateNative.Storage.FileStorage( cachePathOverride, maxOpenFiles, maxLogFileSizeMb) else: storage = None return SharedStateNative.KeyspaceManager(randomSeed, numManagers, backupInterval, pingInterval, storage)
def initialize(self): message = self.getMessage() self.viewChannel.write(SharedStateNative.MessageRequestSession()) message = self.getMessage().asInitialize self.clientId = message.clientId self.masterId = message.masterId self.generator = message.generator
def createChannel(self, ipEndpoint=None): assert ipEndpoint is None viewChannel, managerChannel = SharedStateNative.InMemoryChannel( self.callbackScheduler) with self.lock: self.channelManager.add(managerChannel) self.channels.append(managerChannel) return viewChannel
def test_simple_manager(self): view = createViewWithNoChannel() before = TCMalloc.getBytesUsed() keyspace = SharedState.Keyspace("TakeHighestIdKeyType", json('test'), 1) cache = SharedStateNative.KeyspaceManager(0, 1, 0x7fffffff, 0x7fffffff, None) for event in producePartialEvents(view, [keyspace], 'test', 1024 * 32, 1, 8): cache.addEvent(event) view = None gc.collect() bytesUsed = TCMalloc.getBytesUsed() - before self.assertTrue(bytesUsed < 1024 * 128)
def __init__(self, callbackScheduler, filterFun): self.filterFun = filterFun self.callbackScheduler = callbackScheduler self.viewFacingViewChannel, self.filterFacingManagerChannel = SharedStateNative.InMemoryChannel( self.callbackScheduler) self.filterFacingViewChannel, self.managerFacingManagerChannel = SharedStateNative.InMemoryChannel( self.callbackScheduler) self.stopFlag = threading.Event() self.channelPumpThreads = [] self.channelPumpThreads.append( ManagedThread.ManagedThread(target=self.filteredChannelPump, args=(self.filterFacingManagerChannel, self.filterFacingViewChannel))) self.channelPumpThreads.append( ManagedThread.ManagedThread( target=self.filteredChannelPump, args=(self.filterFacingViewChannel, self.filterFacingManagerChannel))) for thread in self.channelPumpThreads: thread.start()
def test_keyspace_cache(self): numKeys = 1024 * 256 before = TCMalloc.getBytesUsed() view = createViewWithNoChannel() keyspace = SharedState.Keyspace("ComparisonKeyType", json('test'), 1) keyrange = SharedState.KeyRange(keyspace, 0, None, None, True, True) cache = SharedStateNative.KeyspaceCache(keyrange, None) for event in producePartialEvents(view, [keyspace], 'test', numKeys, 1, 8): cache.addEvent(event) cache.newMinimumId(numKeys) view = None gc.collect() bytesUsed = TCMalloc.getBytesUsed() - before self.assertTrue(bytesUsed < 1024 * 16)
def connectedClientInfo(view): """given a view subscribed to the client info keyspace, computes all the connected clients""" clientInfoKeyspace = SharedStateNative.getClientInfoKeyspace() k = view.nextKey(Key(clientInfoKeyspace, (NativeJson.lowestValue(), NativeJson.lowestValue()))) maxId = 0 tr = set() while k is not None: if k.keyspace != clientInfoKeyspace.name: return tr, maxId if view[k].value() != NativeJson.Json('disconnected'): tr.add(k[1]) maxId = max(maxId, view[k].id()) k = view.nextKey(k) return tr, maxId
def test_null_value_is_deleted(self): numEvents = 1024 * 64 valueSize = 128 numKeys = numEvents view = createViewWithNoChannel() keyspace = SharedState.Keyspace("ComparisonKeyType", json('test'), 1) keyrange = SharedState.KeyRange(keyspace, 0, None, None, True, True) cache = SharedStateNative.KeyspaceCache(keyrange, None) gc.collect() m0 = TCMalloc.getBytesUsed() for event in producePartialEvents(view, [keyspace], 'test', numKeys, numKeys, valueSize): cache.addEvent(event) cache.addEvent(produceNullifyingEvent(view, event)) cache.newMinimumId(numKeys * 2) gc.collect() self.assertLess(TCMalloc.getBytesUsed() - m0, 1024 * 4)
def connectedClientInfo(view): """given a view subscribed to the client info keyspace, computes all the connected clients""" clientInfoKeyspace = SharedStateNative.getClientInfoKeyspace() k = view.nextKey( Key(clientInfoKeyspace, (NativeJson.lowestValue(), NativeJson.lowestValue()))) maxId = 0 tr = set() while k is not None: if k.keyspace != clientInfoKeyspace.name: return tr, maxId if view[k].value() != NativeJson.Json('disconnected'): tr.add(k[1]) maxId = max(maxId, view[k].id()) k = view.nextKey(k) return tr, maxId
def test_channel_wrappers(self): viewChannel, managerChannel = SharedStateNative.InMemoryChannel( callbackScheduler) message = SharedState.MessageOut.MinimumIdResponse(0) viewChannel.write(message) managerChannel.get()
def createServerSocketChannel(callbackScheduler, sock): allSockets_.append(sock) return SharedStateNative.ServerSocketChannel(callbackScheduler, sock.fileno())
def createClientSocketMessageChannel(callbackScheduler, sock): allSockets_.append(sock) return SharedStateNative.ClientSocketMessageChannel( callbackScheduler, sock.fileno())
def Listener(view): """Create a new PySharedStateListener subscribed to 'view' and return it.""" listener = SharedStateNative.Listener() listener.listenToView(view) return listener
def updateActiveMachines(self): if self.shouldStop(): return if self._lastDisconnectTime is not None: if self._lastReconnectTime is None: return if time.time() - self._lastReconnectTime < TIME_TO_SLEEP_AFTER_RECONNECT: #we need to defer until later return logging.debug('%s attempting to get introspection information', self.clientID) try: introspectionInfo = self.asyncView.keyspaceItems(SharedStateNative.getClientInfoKeyspace()) except UserWarning: logging.info('AsyncView received exception from View:\n%s', traceback.format_exc()) self.setDisconnected() raise currentlyConnectedClientIds = set([key[1].toSimple() for key, value in introspectionInfo if value.toSimple() != 'disconnected']) currentlyConnectedWorkers = set() for clientId in currentlyConnectedClientIds: machineId = self.clientIdToMachineIdAsString(clientId) if machineId: currentlyConnectedWorkers.add(machineId) logging.debug('%s currently connected list is %s', self.ownMachineIdAsString, currentlyConnectedWorkers) with self._lock: for deadWorker in self.activeMachineIds - currentlyConnectedWorkers: if deadWorker != self.ownMachineIdAsString(): logging.info("Worker dropped: %s which is not our ID (%s)", deadWorker, self.ownMachineIdAsString()) self.activeMachineIds.remove(deadWorker) if deadWorker not in self.machineIdToClientId: logging.critical("Worker %s is dead, but I don't have a client id for it.", deadWorker) assert False else: deadClientID = self.machineIdToClientId[deadWorker] self.onWorkerDrop(deadClientID, *self.clientIdToIpPortAndMachineIdAsString[deadClientID]) newlyAliveClientIds = [] with self._lock: for newlyAliveClientId in set(self.clientIdToIpPortAndMachineIdAsString.keys()).intersection(currentlyConnectedClientIds): nowAliveMachineId = self.clientIdToMachineIdAsString(newlyAliveClientId) if nowAliveMachineId is not None and nowAliveMachineId not in self.activeMachineIds: logging.info( "Worker clientId=%s added with IP %s and ports %s, machineIdAsString=%s", nowAliveMachineId, *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId] ) self.activeMachineIds.add(nowAliveMachineId) # Defer onWorkerAdd notifications. newlyAliveClientIds.append(newlyAliveClientId) logging.debug("Active workers: %s", self.activeMachineIds) for newlyAliveClientId in newlyAliveClientIds: self.onWorkerAdd(newlyAliveClientId, *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId])
def createView_(self, friendlyName): view = SharedStateNative.createView(self.enableDebugPrint) channel = self.channelFactory.createChannel() view.add(channel) return view
def updateActiveMachines(self): if self.shouldStop(): return if self._lastDisconnectTime is not None: if self._lastReconnectTime is None: return if time.time( ) - self._lastReconnectTime < TIME_TO_SLEEP_AFTER_RECONNECT: #we need to defer until later return logging.debug('%s attempting to get introspection information', self.clientID) try: introspectionInfo = self.asyncView.keyspaceItems( SharedStateNative.getClientInfoKeyspace()) except UserWarning: logging.info('AsyncView received exception from View:\n%s', traceback.format_exc()) self.setDisconnected() raise currentlyConnectedClientIds = set([ key[1].toSimple() for key, value in introspectionInfo if value.toSimple() != 'disconnected' ]) currentlyConnectedWorkers = set() for clientId in currentlyConnectedClientIds: machineId = self.clientIdToMachineIdAsString(clientId) if machineId: currentlyConnectedWorkers.add(machineId) logging.debug('%s currently connected list is %s', self.ownMachineIdAsString, currentlyConnectedWorkers) with self._lock: for deadWorker in self.activeMachineIds - currentlyConnectedWorkers: if deadWorker != self.ownMachineIdAsString(): logging.info("Worker dropped: %s which is not our ID (%s)", deadWorker, self.ownMachineIdAsString()) self.activeMachineIds.remove(deadWorker) if deadWorker not in self.machineIdToClientId: logging.critical( "Worker %s is dead, but I don't have a client id for it.", deadWorker) assert False else: deadClientID = self.machineIdToClientId[deadWorker] self.onWorkerDrop( deadClientID, *self. clientIdToIpPortAndMachineIdAsString[deadClientID]) newlyAliveClientIds = [] with self._lock: for newlyAliveClientId in set( self.clientIdToIpPortAndMachineIdAsString.keys( )).intersection(currentlyConnectedClientIds): nowAliveMachineId = self.clientIdToMachineIdAsString( newlyAliveClientId) if nowAliveMachineId is not None and nowAliveMachineId not in self.activeMachineIds: logging.debug( "Worker clientId=%s added with IP %s and ports %s, machineIdAsString=%s", nowAliveMachineId, *self.clientIdToIpPortAndMachineIdAsString[ newlyAliveClientId]) self.activeMachineIds.add(nowAliveMachineId) # Defer onWorkerAdd notifications. newlyAliveClientIds.append(newlyAliveClientId) logging.debug("Active workers: %s", self.activeMachineIds) for newlyAliveClientId in newlyAliveClientIds: self.onWorkerAdd( newlyAliveClientId, *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId])