Пример #1
0
    def increaseRequestCount(self, compValue, cumulusComputationDefinition):
        computationId = self.cumulusGateway.getComputationIdForDefinition(
                        cumulusComputationDefinition
                        )

        with self.lock_:
            if computationId not in self.computedValuesForComputations:
                self.computedValuesForComputations[computationId] = set()
            self.computedValuesForComputations[computationId].add(compValue)

            self.refcountsForCompIds_[computationId] += 1
            if self.refcountsForCompIds_[computationId] == 1:
                self.cumulusGateway.setComputationPriority(
                    computationId,
                    CumulusNative.ComputationPriority(self.allocNewPriority_())
                    )

            if computationId in self.finishedResultsForComputations:
                result, statistics = self.finishedResultsForComputations[computationId]
                BackgroundUpdateQueue.push(
                    self.valueUpdater(
                        compValue,
                        result,
                        statistics
                        )
                    )
Пример #2
0
    def resetStateEntirely(self):
        """Cancel all computations and clear the compute cache."""
        self.cancelAllComputations(True)

        with self.lock_:
            computations = list(self.refcountsForCompIds_.keys())

            for computationId in computations:
                if computationId in self.finishedResultsForComputations:
                    del self.finishedResultsForComputations[computationId]
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.valueUpdater(
                            compVal,
                            None,
                            None
                            )
                        )

            for vecId in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[vecId]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))
            
            self.vectorDataIDRequestCount_ = {}
            self.vectorDataIDToVectorSlices_ = {}
Пример #3
0
def waitForResults(graph, values, timeout=60.0):
    """wait for 'x' to have 'result' not be zero. return the maximum number
    of CPUs ever observed to be allocated to 'x'"""
    t0 = time.time()

    def notLoadedCount():
        notLoadedCount = 0

        for x in values:
            if x.valueIVC is None:
                notLoadedCount += 1

        return notLoadedCount

    with IncreasedRequestCount(values):
        maxCPUs = None
        while notLoadedCount() > 0 and time.time() - t0 < timeout:
            time.sleep(.01)
            BackgroundUpdateQueue.pullOne(timeout=.10)
            graph.flush()

            for x in values:
                if x.totalWorkerCount is not None:
                    if maxCPUs is None:
                        maxCPUs = x.totalWorkerCount
                    else:
                        maxCPUs = max(maxCPUs, x.totalWorkerCount)

        assert notLoadedCount(
        ) == 0, "Timed out: %s of %s didn't finish:\n\t%s\n" % (
            notLoadedCount(), len(values), "\n\t".join(
                [str(x) for x in values if x.valueIVC is None]))

        return maxCPUs
Пример #4
0
    def increaseRequestCount(self, compValue, cumulusComputationDefinition):
        computationId = self.cumulusGateway.getComputationIdForDefinition(
                        cumulusComputationDefinition
                        )

        with self.lock_:
            if computationId not in self.computedValuesForComputations:
                self.computedValuesForComputations[computationId] = set()
            self.computedValuesForComputations[computationId].add(compValue)

            self.refcountsForCompIds_[computationId] += 1
            if self.refcountsForCompIds_[computationId] == 1:
                self.cumulusGateway.setComputationPriority(
                    computationId,
                    CumulusNative.ComputationPriority(self.allocNewPriority_())
                    )

            if computationId in self.finishedResultsForComputations:
                result, statistics = self.finishedResultsForComputations[computationId]
                BackgroundUpdateQueue.push(
                    self.valueUpdater(
                        compValue,
                        result,
                        statistics
                        )
                    )
Пример #5
0
    def resetStateEntirely(self):
        """Cancel all computations and clear the compute cache."""
        self.cancelAllComputations(True)

        with self.lock_:
            computations = list(self.refcountsForCompIds_.keys())

            for computationId in computations:
                if computationId in self.finishedResultsForComputations:
                    del self.finishedResultsForComputations[computationId]
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.valueUpdater(
                            compVal,
                            None,
                            None
                            )
                        )

            for vecId in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[vecId]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))
            
            self.vectorDataIDRequestCount_ = {}
            self.vectorDataIDToVectorSlices_ = {}
Пример #6
0
 def onComputationResult(self, computationId, result, statistics):
     with self.lock_:
         self.finishedResultsForComputations[computationId] = (result,
                                                               statistics)
         if computationId in self.computedValuesForComputations:
             for compVal in self.computedValuesForComputations[
                     computationId]:
                 BackgroundUpdateQueue.push(
                     self.valueUpdater(compVal, result, statistics))
Пример #7
0
    def onCPUCountChanged(self, computationSystemwideCpuAssignment):
        with self.lock_:
            computationId = computationSystemwideCpuAssignment.computation

            if computationId in self.computedValuesForComputations:
                for compVal in self.computedValuesForComputations[
                        computationId]:
                    BackgroundUpdateQueue.push(
                        self.cpuCountSetter_(
                            compVal, computationSystemwideCpuAssignment))
Пример #8
0
    def onNewGlobalUserFacingLogMessage(self, newMsg):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex
                
            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().pushNewGlobalUserFacingLogMessage(newMsg)

        BackgroundUpdateQueue.push(updater)
Пример #9
0
    def onNewGlobalUserFacingLogMessage(self, newMsg):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex
                
            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().pushNewGlobalUserFacingLogMessage(newMsg)

        BackgroundUpdateQueue.push(updater)
Пример #10
0
    def onCacheLoad(self, vectorDataID):
        with self.lock_:
            if vectorDataID in self.vectorDataIDToVectorSlices_:
                cgLocations = self.vectorDataIDToVectorSlices_[vectorDataID]
            else:
                cgLocations = set()

            self.collectOffloadedVectors_()

            for cgLocation in cgLocations:
                BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, True))
Пример #11
0
    def onCacheLoad(self, vectorDataID):
        with self.lock_:
            if vectorDataID in self.vectorDataIDToVectorSlices_:
                cgLocations = self.vectorDataIDToVectorSlices_[vectorDataID]
            else:
                cgLocations = set()

            self.collectOffloadedVectors_()

            for cgLocation in cgLocations:
                BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, self.computeVectorSliceIsLoaded_(cgLocation)))
Пример #12
0
    def onJsonViewOfSystemChanged(self, json):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex

            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().viewOfSystem_ = json.toSimple()
            PersistentCacheIndex.PersistentCacheIndex().update()

        BackgroundUpdateQueue.push(updater)
Пример #13
0
    def onJsonViewOfSystemChanged(self, json):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex

            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().viewOfSystem_ = json.toSimple()
            PersistentCacheIndex.PersistentCacheIndex().update()

        BackgroundUpdateQueue.push(updater)
Пример #14
0
 def onComputationResult(self, computationId, result, statistics):
     with self.lock_:
         self.finishedResultsForComputations[computationId] = (result, statistics)
         if computationId in self.computedValuesForComputations:
             for compVal in self.computedValuesForComputations[computationId]:
                 BackgroundUpdateQueue.push(
                     self.valueUpdater(
                         compVal,
                         result,
                         statistics
                         )
                     )
Пример #15
0
    def onCPUCountChanged(self, computationSystemwideCpuAssignment):
        with self.lock_:
            computationId = computationSystemwideCpuAssignment.computation

            if computationId in self.computedValuesForComputations:
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.cpuCountSetter_(
                            compVal,
                            computationSystemwideCpuAssignment
                            )
                        )
Пример #16
0
    def waitUntilTrue(self, predicate, timeout = 10.0):
        t0 = time.time()
        interval = 0.001
        while not predicate():
            time.sleep(interval)

            BackgroundUpdateQueue.pullOne(timeout = interval)
            self.graph.flush()

            if time.time() - t0 > timeout:
                self.assertFalse(True, "timed out")
                return

            interval = min(interval * 2, 0.1)
Пример #17
0
    def onExternalIoTaskCompleted(self, msg):
        taskGuid = msg.taskId.guid
        with self.lock_:
            if taskGuid not in self.externalIoTaskCallbacks_:
                logging.warn("TaskId %s was not found in the task guid list", taskGuid)
                return

            callback = self.externalIoTaskCallbacks_[taskGuid]
            del self.externalIoTaskCallbacks_[taskGuid]

            def executor():
                callback(msg.result)

            BackgroundUpdateQueue.push(executor)
Пример #18
0
    def onExternalIoTaskCompleted(self, msg):
        taskGuid = msg.taskId.guid
        with self.lock_:
            if taskGuid not in self.externalIoTaskCallbacks_:
                logging.warn("TaskId %s was not found in the task guid list", taskGuid)
                return

            callback = self.externalIoTaskCallbacks_[taskGuid]
            del self.externalIoTaskCallbacks_[taskGuid]

            def executor():
                callback(msg.result)

            BackgroundUpdateQueue.push(executor)
Пример #19
0
    def collectOffloadedVectors_(self):
        offloaded = self.ramCacheOffloadRecorder.extractDropped()

        if offloaded:
            logging.info("ComputedValue RamCache dropped %s", offloaded)

        for offloadedVecDataID in offloaded:
            if offloadedVecDataID in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[offloadedVecDataID]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))

        if offloaded:
            #check if there's anything we need to load
            self.sendReloadRequests()
Пример #20
0
    def collectOffloadedVectors_(self):
        offloaded = self.ramCacheOffloadRecorder.extractDropped()

        if offloaded:
            logging.info("ComputedValue RamCache dropped %s", offloaded)

        for offloadedVecDataID in offloaded:
            if offloadedVecDataID in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[offloadedVecDataID]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))

        if offloaded:
            #check if there's anything we need to load
            self.sendReloadRequests()
Пример #21
0
    def waitUntilTrue(self, predicate, timeout=10.0):
        t0 = time.time()
        interval = 0.001
        while not predicate():
            time.sleep(interval)

            BackgroundUpdateQueue.pullOne(timeout=interval)
            self.graph.flush()

            if time.time() - t0 > timeout:
                self.assertFalse(True, "timed out")
                return

            interval = min(interval * 2, 0.1)
Пример #22
0
    def updateComputedGraph_(self):
        self.sharedStateSynchronizer.update()

        BackgroundUpdateQueue.moveNextFrameToCurFrame()
        BackgroundUpdateQueue.pullAll()

        self.computedGraph.flushOrphans()
        self.computedGraph.flush()

        self.controlRoot.pruneDirtyChildren()
        self.controlRoot.update()

        #self.controlRoot.display()

        self.sharedStateSynchronizer.commitPendingWrites()
Пример #23
0
    def updateComputedGraph_(self):
        self.sharedStateSynchronizer.update()

        BackgroundUpdateQueue.moveNextFrameToCurFrame()
        BackgroundUpdateQueue.pullAll()

        self.computedGraph.flushOrphans()
        self.computedGraph.flush()

        self.controlRoot.pruneDirtyChildren()
        self.controlRoot.update()

        #self.controlRoot.display()

        self.sharedStateSynchronizer.commitPendingWrites()
Пример #24
0
def waitCacheItemsAreLoaded(graph, cacheItems, timeout = 60.0):
    with IncreasedRequestCount(cacheItems):
        def allAreLoaded():
            for c in cacheItems:
                if not c.isLoaded:
                    return False

            return True

        t0 = time.time()

        while not allAreLoaded() and time.time() - t0 < timeout:
            BackgroundUpdateQueue.pullOne(timeout = 1.0)
            graph.flush()

        assert allAreLoaded(), "Timed out"
Пример #25
0
def waitCacheItemsAreLoaded(graph, cacheItems, timeout=60.0):
    with IncreasedRequestCount(cacheItems):

        def allAreLoaded():
            for c in cacheItems:
                if not c.isLoaded:
                    return False

            return True

        t0 = time.time()

        while not allAreLoaded() and time.time() - t0 < timeout:
            BackgroundUpdateQueue.pullOne(timeout=1.0)
            graph.flush()

        assert allAreLoaded(), "Timed out"
Пример #26
0
def waitForResults(graph, values, timeout = 60.0):
    """wait for 'x' to have 'result' not be zero. return the maximum number
    of CPUs ever observed to be allocated to 'x'"""
    t0 = time.time()

    def notLoadedCount():
        notLoadedCount = 0

        for x in values:
            if x.valueIVC is None:
                notLoadedCount += 1

        return notLoadedCount

    with IncreasedRequestCount(values):
        maxCPUs = None
        while notLoadedCount() > 0 and time.time() - t0 < timeout:
            time.sleep(.01)
            BackgroundUpdateQueue.pullOne(timeout = .10)
            graph.flush()

            for x in values:
                if x.totalWorkerCount is not None:
                    if maxCPUs is None:
                        maxCPUs = x.totalWorkerCount
                    else:
                        maxCPUs = max(maxCPUs,x.totalWorkerCount)

        assert notLoadedCount() == 0, "Timed out: %s of %s didn't finish:\n\t%s\n" % (
            notLoadedCount(),
            len(values),
            "\n\t".join(
                [str(x) for x in values if x.valueIVC is None]
                )
            )

        return maxCPUs
Пример #27
0
 def refreshGraph(self):
     BackgroundUpdateQueue.pullAll()
     self.graph.flush()
Пример #28
0
def waitCacheItemIsLoaded(graph, cacheItem):
    with IncreasedRequestCount(cacheItem):
        while not cacheItem.isLoaded:
            BackgroundUpdateQueue.pullOne()
            graph.flush()
Пример #29
0
def refreshGraph(graph):
    BackgroundUpdateQueue.pullAll()
    graph.flush()
Пример #30
0
def refreshGraph(graph):
    BackgroundUpdateQueue.pullAll()
    graph.flush()
Пример #31
0
def waitCacheItemIsLoaded(graph, cacheItem):
    with IncreasedRequestCount(cacheItem):
        while not cacheItem.isLoaded:
            BackgroundUpdateQueue.pullOne()
            graph.flush()
Пример #32
0
 def refreshGraph(self):
     BackgroundUpdateQueue.pullAll()
     self.graph.flush()