Esempio n. 1
0
def createManagerAndView(cacheDir=None, useMemoChannel=False):
    viewChannel, managerChannel = SharedStateNative.InMemoryChannelWithoutMemo(callbackScheduler)

    storage = None

    if cacheDir:
        storage = SharedStateNative.Storage.FileStorage(
            cacheDir,
            10,
            .1,
            useMemoChannel
            )


    manager = SharedStateNative.KeyspaceManager(
            0,
            1,
            0x7fffffff,
            0x7fffffff,
            '',
            storage)

    manager.add(managerChannel)
    view = DummyView(viewChannel)
    view.initialize()
    return view, manager
Esempio n. 2
0
 def subscribe():
     try:
         self.asyncView.subscribeToKeyspace(SharedStateNative.getClientInfoKeyspace(), 1, self.onNewClientInfo)
         doneDeferred = self.asyncView.subscribeToKeyspace(self.workerStatusKeyspace, 0, self.onNewWorkerStatus)
         doneDeferred.addCallbacks(subscribeCallback, lambda exception : None)
     except UserWarning as ex:
         logging.warn("Failed to subscribe to asyncView keyspace because we disconnected from shared state while reconnecting to shared state")
Esempio n. 3
0
def KeyspaceManager(randomSeed,
                    numManagers,
                    backupInterval=60 * 10,
                    pingInterval=20,
                    cachePathOverride=None,
                    maxOpenFiles=None,
                    maxLogFileSizeMb=10):
    if cachePathOverride is None:
        cachePathOverride = Setup.config().sharedStateCache

    if maxOpenFiles is None:
        import resource
        maxOpenFiles = min(
            resource.getrlimit(resource.RLIMIT_NOFILE)[0] / 2, 1000)

    if cachePathOverride != "":
        logging.info(
            "Creating FileStorage(cachePathOverride=%s, maxOpenFiles=%s, maxLogFileSizeMb=%s)",
            cachePathOverride, maxOpenFiles, maxLogFileSizeMb)
        storage = SharedStateNative.Storage.FileStorage(
            cachePathOverride, maxOpenFiles, maxLogFileSizeMb)
    else:
        storage = None

    return SharedStateNative.KeyspaceManager(randomSeed, numManagers,
                                             backupInterval, pingInterval,
                                             storage)
Esempio n. 4
0
    def initialize(self):
        message = self.getMessage()
        self.viewChannel.write(SharedStateNative.MessageRequestSession())

        message = self.getMessage().asInitialize
        self.clientId = message.clientId
        self.masterId = message.masterId
        self.generator = message.generator
Esempio n. 5
0
    def createChannel(self, ipEndpoint=None):
        assert ipEndpoint is None
        viewChannel, managerChannel = SharedStateNative.InMemoryChannel(
            self.callbackScheduler)
        with self.lock:
            self.channelManager.add(managerChannel)
            self.channels.append(managerChannel)

        return viewChannel
Esempio n. 6
0
    def test_simple_manager(self):
        view = createViewWithNoChannel()
        before = TCMalloc.getBytesUsed()
        keyspace = SharedState.Keyspace("TakeHighestIdKeyType", json('test'), 1)
        cache = SharedStateNative.KeyspaceManager(0, 1, 0x7fffffff, 0x7fffffff, None)
        for event in producePartialEvents(view, [keyspace], 'test', 1024 * 32, 1, 8):
            cache.addEvent(event)
        view = None

        gc.collect()
        bytesUsed = TCMalloc.getBytesUsed() - before
        self.assertTrue(bytesUsed < 1024 * 128)
Esempio n. 7
0
    def __init__(self, callbackScheduler, filterFun):
        self.filterFun = filterFun
        self.callbackScheduler = callbackScheduler
        self.viewFacingViewChannel, self.filterFacingManagerChannel = SharedStateNative.InMemoryChannel(
            self.callbackScheduler)
        self.filterFacingViewChannel, self.managerFacingManagerChannel = SharedStateNative.InMemoryChannel(
            self.callbackScheduler)
        self.stopFlag = threading.Event()

        self.channelPumpThreads = []
        self.channelPumpThreads.append(
            ManagedThread.ManagedThread(target=self.filteredChannelPump,
                                        args=(self.filterFacingManagerChannel,
                                              self.filterFacingViewChannel)))
        self.channelPumpThreads.append(
            ManagedThread.ManagedThread(
                target=self.filteredChannelPump,
                args=(self.filterFacingViewChannel,
                      self.filterFacingManagerChannel)))
        for thread in self.channelPumpThreads:
            thread.start()
Esempio n. 8
0
    def test_keyspace_cache(self):
        numKeys = 1024 * 256
        before = TCMalloc.getBytesUsed()
        view = createViewWithNoChannel()
        keyspace = SharedState.Keyspace("ComparisonKeyType", json('test'), 1)
        keyrange = SharedState.KeyRange(keyspace, 0, None, None, True, True)
        cache = SharedStateNative.KeyspaceCache(keyrange, None)
        for event in producePartialEvents(view, [keyspace], 'test', numKeys, 1, 8):
            cache.addEvent(event)
        cache.newMinimumId(numKeys)
        view = None

        gc.collect()
        bytesUsed = TCMalloc.getBytesUsed() - before
        self.assertTrue(bytesUsed < 1024 * 16)
Esempio n. 9
0
def connectedClientInfo(view):
    """given a view subscribed to the client info keyspace, computes all the connected
    clients"""

    clientInfoKeyspace = SharedStateNative.getClientInfoKeyspace()
    k = view.nextKey(Key(clientInfoKeyspace, (NativeJson.lowestValue(), NativeJson.lowestValue())))

    maxId = 0
    tr = set()
    while k is not None:
        if k.keyspace != clientInfoKeyspace.name:
            return tr, maxId
        if view[k].value() != NativeJson.Json('disconnected'):
            tr.add(k[1])
        maxId = max(maxId, view[k].id())
        k = view.nextKey(k)
    return tr, maxId
Esempio n. 10
0
    def test_null_value_is_deleted(self):
        numEvents = 1024 * 64
        valueSize = 128
        numKeys = numEvents

        view = createViewWithNoChannel()
        keyspace = SharedState.Keyspace("ComparisonKeyType", json('test'), 1)
        keyrange = SharedState.KeyRange(keyspace, 0, None, None, True, True)
        cache = SharedStateNative.KeyspaceCache(keyrange, None)
        gc.collect()
        m0 = TCMalloc.getBytesUsed()
        for event in producePartialEvents(view, [keyspace], 'test', numKeys, numKeys, valueSize):
            cache.addEvent(event)
            cache.addEvent(produceNullifyingEvent(view, event))

        cache.newMinimumId(numKeys * 2)
        gc.collect()
        self.assertLess(TCMalloc.getBytesUsed() - m0, 1024 * 4)
Esempio n. 11
0
def connectedClientInfo(view):
    """given a view subscribed to the client info keyspace, computes all the connected
    clients"""

    clientInfoKeyspace = SharedStateNative.getClientInfoKeyspace()
    k = view.nextKey(
        Key(clientInfoKeyspace,
            (NativeJson.lowestValue(), NativeJson.lowestValue())))

    maxId = 0
    tr = set()
    while k is not None:
        if k.keyspace != clientInfoKeyspace.name:
            return tr, maxId
        if view[k].value() != NativeJson.Json('disconnected'):
            tr.add(k[1])
        maxId = max(maxId, view[k].id())
        k = view.nextKey(k)
    return tr, maxId
 def test_channel_wrappers(self):
     viewChannel, managerChannel = SharedStateNative.InMemoryChannel(
         callbackScheduler)
     message = SharedState.MessageOut.MinimumIdResponse(0)
     viewChannel.write(message)
     managerChannel.get()
Esempio n. 13
0
def createServerSocketChannel(callbackScheduler, sock):
    allSockets_.append(sock)
    return SharedStateNative.ServerSocketChannel(callbackScheduler,
                                                 sock.fileno())
Esempio n. 14
0
def createClientSocketMessageChannel(callbackScheduler, sock):
    allSockets_.append(sock)
    return SharedStateNative.ClientSocketMessageChannel(
        callbackScheduler, sock.fileno())
Esempio n. 15
0
def Listener(view):
    """Create a new PySharedStateListener subscribed to 'view' and return it."""
    listener = SharedStateNative.Listener()
    listener.listenToView(view)
    return listener
Esempio n. 16
0
    def updateActiveMachines(self):
        if self.shouldStop():
            return

        if self._lastDisconnectTime is not None:
            if self._lastReconnectTime is None:
                return

            if time.time() - self._lastReconnectTime < TIME_TO_SLEEP_AFTER_RECONNECT:
                #we need to defer until later
                return

        logging.debug('%s attempting to get introspection information', self.clientID)
        try:
            introspectionInfo = self.asyncView.keyspaceItems(SharedStateNative.getClientInfoKeyspace())
        except UserWarning:
            logging.info('AsyncView received exception from View:\n%s', traceback.format_exc())
            self.setDisconnected()
            raise

        currentlyConnectedClientIds = set([key[1].toSimple() for key, value in introspectionInfo if value.toSimple() != 'disconnected'])

        currentlyConnectedWorkers = set()
        for clientId in currentlyConnectedClientIds:
            machineId = self.clientIdToMachineIdAsString(clientId)
            if machineId:
                currentlyConnectedWorkers.add(machineId)

        logging.debug('%s currently connected list is %s', self.ownMachineIdAsString, currentlyConnectedWorkers)

        with self._lock:
            for deadWorker in self.activeMachineIds - currentlyConnectedWorkers:
                if deadWorker != self.ownMachineIdAsString():
                    logging.info("Worker dropped: %s which is not our ID (%s)", deadWorker, self.ownMachineIdAsString())
                    self.activeMachineIds.remove(deadWorker)

                    if deadWorker not in self.machineIdToClientId:
                        logging.critical("Worker %s is dead, but I don't have a client id for it.", deadWorker)
                        assert False
                    else:
                        deadClientID = self.machineIdToClientId[deadWorker]
                        self.onWorkerDrop(deadClientID, *self.clientIdToIpPortAndMachineIdAsString[deadClientID])

        newlyAliveClientIds = []
        with self._lock:
            for newlyAliveClientId in set(self.clientIdToIpPortAndMachineIdAsString.keys()).intersection(currentlyConnectedClientIds):
                nowAliveMachineId = self.clientIdToMachineIdAsString(newlyAliveClientId)

                if nowAliveMachineId is not None and nowAliveMachineId not in self.activeMachineIds:
                    logging.info(
                            "Worker clientId=%s added with IP %s and ports %s, machineIdAsString=%s",
                            nowAliveMachineId,
                            *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId]
                            )
                    self.activeMachineIds.add(nowAliveMachineId)

                    # Defer onWorkerAdd notifications.
                    newlyAliveClientIds.append(newlyAliveClientId)
                    logging.debug("Active workers: %s", self.activeMachineIds)

        for newlyAliveClientId in newlyAliveClientIds:
            self.onWorkerAdd(newlyAliveClientId, *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId])
Esempio n. 17
0
    def createView_(self, friendlyName):
        view = SharedStateNative.createView(self.enableDebugPrint)
        channel = self.channelFactory.createChannel()
        view.add(channel)

        return view
Esempio n. 18
0
    def createView_(self, friendlyName):
        view = SharedStateNative.createView(self.enableDebugPrint)
        channel = self.channelFactory.createChannel()
        view.add(channel)

        return view
Esempio n. 19
0
    def updateActiveMachines(self):
        if self.shouldStop():
            return

        if self._lastDisconnectTime is not None:
            if self._lastReconnectTime is None:
                return

            if time.time(
            ) - self._lastReconnectTime < TIME_TO_SLEEP_AFTER_RECONNECT:
                #we need to defer until later
                return

        logging.debug('%s attempting to get introspection information',
                      self.clientID)
        try:
            introspectionInfo = self.asyncView.keyspaceItems(
                SharedStateNative.getClientInfoKeyspace())
        except UserWarning:
            logging.info('AsyncView received exception from View:\n%s',
                         traceback.format_exc())
            self.setDisconnected()
            raise

        currentlyConnectedClientIds = set([
            key[1].toSimple() for key, value in introspectionInfo
            if value.toSimple() != 'disconnected'
        ])

        currentlyConnectedWorkers = set()
        for clientId in currentlyConnectedClientIds:
            machineId = self.clientIdToMachineIdAsString(clientId)
            if machineId:
                currentlyConnectedWorkers.add(machineId)

        logging.debug('%s currently connected list is %s',
                      self.ownMachineIdAsString, currentlyConnectedWorkers)

        with self._lock:
            for deadWorker in self.activeMachineIds - currentlyConnectedWorkers:
                if deadWorker != self.ownMachineIdAsString():
                    logging.info("Worker dropped: %s which is not our ID (%s)",
                                 deadWorker, self.ownMachineIdAsString())
                    self.activeMachineIds.remove(deadWorker)

                    if deadWorker not in self.machineIdToClientId:
                        logging.critical(
                            "Worker %s is dead, but I don't have a client id for it.",
                            deadWorker)
                        assert False
                    else:
                        deadClientID = self.machineIdToClientId[deadWorker]
                        self.onWorkerDrop(
                            deadClientID, *self.
                            clientIdToIpPortAndMachineIdAsString[deadClientID])

        newlyAliveClientIds = []
        with self._lock:
            for newlyAliveClientId in set(
                    self.clientIdToIpPortAndMachineIdAsString.keys(
                    )).intersection(currentlyConnectedClientIds):
                nowAliveMachineId = self.clientIdToMachineIdAsString(
                    newlyAliveClientId)

                if nowAliveMachineId is not None and nowAliveMachineId not in self.activeMachineIds:
                    logging.debug(
                        "Worker clientId=%s added with IP %s and ports %s, machineIdAsString=%s",
                        nowAliveMachineId,
                        *self.clientIdToIpPortAndMachineIdAsString[
                            newlyAliveClientId])
                    self.activeMachineIds.add(nowAliveMachineId)

                    # Defer onWorkerAdd notifications.
                    newlyAliveClientIds.append(newlyAliveClientId)
                    logging.debug("Active workers: %s", self.activeMachineIds)

        for newlyAliveClientId in newlyAliveClientIds:
            self.onWorkerAdd(
                newlyAliveClientId,
                *self.clientIdToIpPortAndMachineIdAsString[newlyAliveClientId])