Ejemplo n.º 1
0
    def increaseRequestCount(self, compValue, cumulusComputationDefinition):
        computationId = self.cumulusGateway.getComputationIdForDefinition(
                        cumulusComputationDefinition
                        )

        with self.lock_:
            if computationId not in self.computedValuesForComputations:
                self.computedValuesForComputations[computationId] = set()
            self.computedValuesForComputations[computationId].add(compValue)

            self.refcountsForCompIds_[computationId] += 1
            if self.refcountsForCompIds_[computationId] == 1:
                self.cumulusGateway.setComputationPriority(
                    computationId,
                    CumulusNative.ComputationPriority(self.allocNewPriority_())
                    )

            if computationId in self.finishedResultsForComputations:
                result, statistics = self.finishedResultsForComputations[computationId]
                BackgroundUpdateQueue.push(
                    self.valueUpdater(
                        compValue,
                        result,
                        statistics
                        )
                    )
Ejemplo n.º 2
0
    def resetStateEntirely(self):
        """Cancel all computations and clear the compute cache."""
        self.cancelAllComputations(True)

        with self.lock_:
            computations = list(self.refcountsForCompIds_.keys())

            for computationId in computations:
                if computationId in self.finishedResultsForComputations:
                    del self.finishedResultsForComputations[computationId]
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.valueUpdater(
                            compVal,
                            None,
                            None
                            )
                        )

            for vecId in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[vecId]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))
            
            self.vectorDataIDRequestCount_ = {}
            self.vectorDataIDToVectorSlices_ = {}
Ejemplo n.º 3
0
def waitForResults(graph, values, timeout=60.0):
    """wait for 'x' to have 'result' not be zero. return the maximum number
    of CPUs ever observed to be allocated to 'x'"""
    t0 = time.time()

    def notLoadedCount():
        notLoadedCount = 0

        for x in values:
            if x.valueIVC is None:
                notLoadedCount += 1

        return notLoadedCount

    with IncreasedRequestCount(values):
        maxCPUs = None
        while notLoadedCount() > 0 and time.time() - t0 < timeout:
            time.sleep(.01)
            BackgroundUpdateQueue.pullOne(timeout=.10)
            graph.flush()

            for x in values:
                if x.totalWorkerCount is not None:
                    if maxCPUs is None:
                        maxCPUs = x.totalWorkerCount
                    else:
                        maxCPUs = max(maxCPUs, x.totalWorkerCount)

        assert notLoadedCount(
        ) == 0, "Timed out: %s of %s didn't finish:\n\t%s\n" % (
            notLoadedCount(), len(values), "\n\t".join(
                [str(x) for x in values if x.valueIVC is None]))

        return maxCPUs
Ejemplo n.º 4
0
    def increaseRequestCount(self, compValue, cumulusComputationDefinition):
        computationId = self.cumulusGateway.getComputationIdForDefinition(
                        cumulusComputationDefinition
                        )

        with self.lock_:
            if computationId not in self.computedValuesForComputations:
                self.computedValuesForComputations[computationId] = set()
            self.computedValuesForComputations[computationId].add(compValue)

            self.refcountsForCompIds_[computationId] += 1
            if self.refcountsForCompIds_[computationId] == 1:
                self.cumulusGateway.setComputationPriority(
                    computationId,
                    CumulusNative.ComputationPriority(self.allocNewPriority_())
                    )

            if computationId in self.finishedResultsForComputations:
                result, statistics = self.finishedResultsForComputations[computationId]
                BackgroundUpdateQueue.push(
                    self.valueUpdater(
                        compValue,
                        result,
                        statistics
                        )
                    )
Ejemplo n.º 5
0
    def resetStateEntirely(self):
        """Cancel all computations and clear the compute cache."""
        self.cancelAllComputations(True)

        with self.lock_:
            computations = list(self.refcountsForCompIds_.keys())

            for computationId in computations:
                if computationId in self.finishedResultsForComputations:
                    del self.finishedResultsForComputations[computationId]
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.valueUpdater(
                            compVal,
                            None,
                            None
                            )
                        )

            for vecId in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[vecId]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))
            
            self.vectorDataIDRequestCount_ = {}
            self.vectorDataIDToVectorSlices_ = {}
Ejemplo n.º 6
0
 def onComputationResult(self, computationId, result, statistics):
     with self.lock_:
         self.finishedResultsForComputations[computationId] = (result,
                                                               statistics)
         if computationId in self.computedValuesForComputations:
             for compVal in self.computedValuesForComputations[
                     computationId]:
                 BackgroundUpdateQueue.push(
                     self.valueUpdater(compVal, result, statistics))
Ejemplo n.º 7
0
    def onCPUCountChanged(self, computationSystemwideCpuAssignment):
        with self.lock_:
            computationId = computationSystemwideCpuAssignment.computation

            if computationId in self.computedValuesForComputations:
                for compVal in self.computedValuesForComputations[
                        computationId]:
                    BackgroundUpdateQueue.push(
                        self.cpuCountSetter_(
                            compVal, computationSystemwideCpuAssignment))
Ejemplo n.º 8
0
    def onNewGlobalUserFacingLogMessage(self, newMsg):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex
                
            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().pushNewGlobalUserFacingLogMessage(newMsg)

        BackgroundUpdateQueue.push(updater)
Ejemplo n.º 9
0
    def onNewGlobalUserFacingLogMessage(self, newMsg):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex
                
            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().pushNewGlobalUserFacingLogMessage(newMsg)

        BackgroundUpdateQueue.push(updater)
Ejemplo n.º 10
0
    def onCacheLoad(self, vectorDataID):
        with self.lock_:
            if vectorDataID in self.vectorDataIDToVectorSlices_:
                cgLocations = self.vectorDataIDToVectorSlices_[vectorDataID]
            else:
                cgLocations = set()

            self.collectOffloadedVectors_()

            for cgLocation in cgLocations:
                BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, True))
Ejemplo n.º 11
0
    def onCacheLoad(self, vectorDataID):
        with self.lock_:
            if vectorDataID in self.vectorDataIDToVectorSlices_:
                cgLocations = self.vectorDataIDToVectorSlices_[vectorDataID]
            else:
                cgLocations = set()

            self.collectOffloadedVectors_()

            for cgLocation in cgLocations:
                BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, self.computeVectorSliceIsLoaded_(cgLocation)))
Ejemplo n.º 12
0
    def onJsonViewOfSystemChanged(self, json):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex

            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().viewOfSystem_ = json.toSimple()
            PersistentCacheIndex.PersistentCacheIndex().update()

        BackgroundUpdateQueue.push(updater)
Ejemplo n.º 13
0
    def onJsonViewOfSystemChanged(self, json):
        def updater():
            global ViewOfEntireCumulusSystem
            global PersistentCacheIndex
            if ViewOfEntireCumulusSystem is None:
                import ufora.BackendGateway.ComputedValue.ViewOfEntireCumulusSystem as ViewOfEntireCumulusSystem
                import ufora.BackendGateway.ComputedValue.PersistentCacheIndex as PersistentCacheIndex

            ViewOfEntireCumulusSystem.ViewOfEntireCumulusSystem().viewOfSystem_ = json.toSimple()
            PersistentCacheIndex.PersistentCacheIndex().update()

        BackgroundUpdateQueue.push(updater)
Ejemplo n.º 14
0
 def onComputationResult(self, computationId, result, statistics):
     with self.lock_:
         self.finishedResultsForComputations[computationId] = (result, statistics)
         if computationId in self.computedValuesForComputations:
             for compVal in self.computedValuesForComputations[computationId]:
                 BackgroundUpdateQueue.push(
                     self.valueUpdater(
                         compVal,
                         result,
                         statistics
                         )
                     )
Ejemplo n.º 15
0
    def onCPUCountChanged(self, computationSystemwideCpuAssignment):
        with self.lock_:
            computationId = computationSystemwideCpuAssignment.computation

            if computationId in self.computedValuesForComputations:
                for compVal in self.computedValuesForComputations[computationId]:
                    BackgroundUpdateQueue.push(
                        self.cpuCountSetter_(
                            compVal,
                            computationSystemwideCpuAssignment
                            )
                        )
Ejemplo n.º 16
0
    def waitUntilTrue(self, predicate, timeout = 10.0):
        t0 = time.time()
        interval = 0.001
        while not predicate():
            time.sleep(interval)

            BackgroundUpdateQueue.pullOne(timeout = interval)
            self.graph.flush()

            if time.time() - t0 > timeout:
                self.assertFalse(True, "timed out")
                return

            interval = min(interval * 2, 0.1)
Ejemplo n.º 17
0
    def onExternalIoTaskCompleted(self, msg):
        taskGuid = msg.taskId.guid
        with self.lock_:
            if taskGuid not in self.externalIoTaskCallbacks_:
                logging.warn("TaskId %s was not found in the task guid list", taskGuid)
                return

            callback = self.externalIoTaskCallbacks_[taskGuid]
            del self.externalIoTaskCallbacks_[taskGuid]

            def executor():
                callback(msg.result)

            BackgroundUpdateQueue.push(executor)
Ejemplo n.º 18
0
    def onExternalIoTaskCompleted(self, msg):
        taskGuid = msg.taskId.guid
        with self.lock_:
            if taskGuid not in self.externalIoTaskCallbacks_:
                logging.warn("TaskId %s was not found in the task guid list", taskGuid)
                return

            callback = self.externalIoTaskCallbacks_[taskGuid]
            del self.externalIoTaskCallbacks_[taskGuid]

            def executor():
                callback(msg.result)

            BackgroundUpdateQueue.push(executor)
Ejemplo n.º 19
0
    def collectOffloadedVectors_(self):
        offloaded = self.ramCacheOffloadRecorder.extractDropped()

        if offloaded:
            logging.info("ComputedValue RamCache dropped %s", offloaded)

        for offloadedVecDataID in offloaded:
            if offloadedVecDataID in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[offloadedVecDataID]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))

        if offloaded:
            #check if there's anything we need to load
            self.sendReloadRequests()
Ejemplo n.º 20
0
    def collectOffloadedVectors_(self):
        offloaded = self.ramCacheOffloadRecorder.extractDropped()

        if offloaded:
            logging.info("ComputedValue RamCache dropped %s", offloaded)

        for offloadedVecDataID in offloaded:
            if offloadedVecDataID in self.vectorDataIDToVectorSlices_:
                for cgLocation in self.vectorDataIDToVectorSlices_[offloadedVecDataID]:
                    BackgroundUpdateQueue.push(self.createSetIsLoadedFun(cgLocation, False))

        if offloaded:
            #check if there's anything we need to load
            self.sendReloadRequests()
Ejemplo n.º 21
0
    def waitUntilTrue(self, predicate, timeout=10.0):
        t0 = time.time()
        interval = 0.001
        while not predicate():
            time.sleep(interval)

            BackgroundUpdateQueue.pullOne(timeout=interval)
            self.graph.flush()

            if time.time() - t0 > timeout:
                self.assertFalse(True, "timed out")
                return

            interval = min(interval * 2, 0.1)
Ejemplo n.º 22
0
    def updateComputedGraph_(self):
        self.sharedStateSynchronizer.update()

        BackgroundUpdateQueue.moveNextFrameToCurFrame()
        BackgroundUpdateQueue.pullAll()

        self.computedGraph.flushOrphans()
        self.computedGraph.flush()

        self.controlRoot.pruneDirtyChildren()
        self.controlRoot.update()

        #self.controlRoot.display()

        self.sharedStateSynchronizer.commitPendingWrites()
Ejemplo n.º 23
0
    def updateComputedGraph_(self):
        self.sharedStateSynchronizer.update()

        BackgroundUpdateQueue.moveNextFrameToCurFrame()
        BackgroundUpdateQueue.pullAll()

        self.computedGraph.flushOrphans()
        self.computedGraph.flush()

        self.controlRoot.pruneDirtyChildren()
        self.controlRoot.update()

        #self.controlRoot.display()

        self.sharedStateSynchronizer.commitPendingWrites()
Ejemplo n.º 24
0
def waitCacheItemsAreLoaded(graph, cacheItems, timeout = 60.0):
    with IncreasedRequestCount(cacheItems):
        def allAreLoaded():
            for c in cacheItems:
                if not c.isLoaded:
                    return False

            return True

        t0 = time.time()

        while not allAreLoaded() and time.time() - t0 < timeout:
            BackgroundUpdateQueue.pullOne(timeout = 1.0)
            graph.flush()

        assert allAreLoaded(), "Timed out"
Ejemplo n.º 25
0
def waitCacheItemsAreLoaded(graph, cacheItems, timeout=60.0):
    with IncreasedRequestCount(cacheItems):

        def allAreLoaded():
            for c in cacheItems:
                if not c.isLoaded:
                    return False

            return True

        t0 = time.time()

        while not allAreLoaded() and time.time() - t0 < timeout:
            BackgroundUpdateQueue.pullOne(timeout=1.0)
            graph.flush()

        assert allAreLoaded(), "Timed out"
Ejemplo n.º 26
0
def waitForResults(graph, values, timeout = 60.0):
    """wait for 'x' to have 'result' not be zero. return the maximum number
    of CPUs ever observed to be allocated to 'x'"""
    t0 = time.time()

    def notLoadedCount():
        notLoadedCount = 0

        for x in values:
            if x.valueIVC is None:
                notLoadedCount += 1

        return notLoadedCount

    with IncreasedRequestCount(values):
        maxCPUs = None
        while notLoadedCount() > 0 and time.time() - t0 < timeout:
            time.sleep(.01)
            BackgroundUpdateQueue.pullOne(timeout = .10)
            graph.flush()

            for x in values:
                if x.totalWorkerCount is not None:
                    if maxCPUs is None:
                        maxCPUs = x.totalWorkerCount
                    else:
                        maxCPUs = max(maxCPUs,x.totalWorkerCount)

        assert notLoadedCount() == 0, "Timed out: %s of %s didn't finish:\n\t%s\n" % (
            notLoadedCount(),
            len(values),
            "\n\t".join(
                [str(x) for x in values if x.valueIVC is None]
                )
            )

        return maxCPUs
Ejemplo n.º 27
0
 def refreshGraph(self):
     BackgroundUpdateQueue.pullAll()
     self.graph.flush()
Ejemplo n.º 28
0
def waitCacheItemIsLoaded(graph, cacheItem):
    with IncreasedRequestCount(cacheItem):
        while not cacheItem.isLoaded:
            BackgroundUpdateQueue.pullOne()
            graph.flush()
Ejemplo n.º 29
0
def refreshGraph(graph):
    BackgroundUpdateQueue.pullAll()
    graph.flush()
Ejemplo n.º 30
0
def refreshGraph(graph):
    BackgroundUpdateQueue.pullAll()
    graph.flush()
Ejemplo n.º 31
0
def waitCacheItemIsLoaded(graph, cacheItem):
    with IncreasedRequestCount(cacheItem):
        while not cacheItem.isLoaded:
            BackgroundUpdateQueue.pullOne()
            graph.flush()
Ejemplo n.º 32
0
 def refreshGraph(self):
     BackgroundUpdateQueue.pullAll()
     self.graph.flush()