def cacheFunction(self):
     if self.useInMemoryCache:
         return CumulusNative.SimpleOfflineCache(self.callbackScheduler, 1000 * 1024 * 1024)
     else:
         self.diskCacheCount += 1
         return CumulusNative.DiskOfflineCache(
                 self.callbackScheduler,
                 os.path.join(self.diskCacheStorageDir, str(self.diskCacheCount)),
                 100 * 1024 * 1024 * 1024,
                 100000
                 )
Example #2
0
    def diskThroughputTest(self, gb):
        if os.getenv("CUMULUS_DATA_DIR") is None:
            dataDir = tempfile.mkdtemp()
        else:
            dataDir = os.getenv("CUMULUS_DATA_DIR")
        dataDir = os.path.join(dataDir, str(uuid.uuid4()))

        diskCache = CumulusNative.DiskOfflineCache(callbackScheduler, dataDir,
                                                   100 * 1024 * 1024 * 1024,
                                                   100000)

        fiftyMegabytes = ForaNative.encodeStringInSerializedObject(" " * 1024 *
                                                                   1024 * 50)

        logging.info("Writing to %s", dataDir)

        try:
            t0 = time.time()
            for ix in range(gb * 20):
                diskCache.store(
                    ForaNative.PageId(HashNative.Hash.sha1(str(ix)),
                                      50 * 1024 * 1024, 50 * 1024 * 1024),
                    fiftyMegabytes)

            PerformanceTestReporter.recordTest(
                "python.BigBox.Disk.Write%sGB" % gb,
                time.time() - t0, None)

            t0 = time.time()
            for ix in range(gb * 20):
                diskCache.loadIfExists(
                    ForaNative.PageId(HashNative.Hash.sha1(str(ix)),
                                      50 * 1024 * 1024, 50 * 1024 * 1024))

            PerformanceTestReporter.recordTest(
                "python.BigBox.Disk.Read%sGB" % gb,
                time.time() - t0, None)

        finally:
            shutil.rmtree(dataDir)
Example #3
0
    def __init__(self,
                 ownAddress,
                 channelListener,
                 channelFactory,
                 eventHandler,
                 callbackScheduler,
                 diagnosticsDir,
                 config,
                 viewFactory,
                 s3InterfaceFactory=None,
                 objectStore=None):
        Stoppable.Stoppable.__init__(self)

        #acquire a machineId randomly, using uuid
        self.machineId = CumulusNative.MachineId(
            Hash.Hash.sha1(str(uuid.uuid4()))
            )

        self.ownAddress = ownAddress
        self.callbackScheduler = callbackScheduler
        self.viewFactory = viewFactory
        self.s3InterfaceFactory = s3InterfaceFactory
        self.objectStore = objectStore
        self.threadsStarted_ = False
        self.connectedMachines = set()
        self.connectingMachines = set()  # machines we are in the process of connecting to
        self.droppedMachineIds = set()
        self.lock = threading.RLock()
        self.cumulusMaxRamCacheSizeOverride = config.cumulusMaxRamCacheMB * 1024*1024
        self.cumulusVectorRamCacheSizeOverride = config.cumulusVectorRamCacheMB * 1024*1024
        self.cumulusThreadCountOverride = config.cumulusServiceThreadCount
        self.cumulusTrackTcmalloc = config.cumulusTrackTcmalloc
        self.eventHandler = eventHandler

        self.reconnectPersistentCacheIndexViewThreads = []

        if config.cumulusDiskCacheStorageSubdirectory is not None:
            self.cumulusDiskCacheWantsDeletionOnTeardown = True
            self.cumulusDiskCacheStorageDir = os.path.join(
                config.cumulusDiskCacheStorageDir,
                config.cumulusDiskCacheStorageSubdirectory
                )
        else:
            self.cumulusDiskCacheWantsDeletionOnTeardown = False
            self.cumulusDiskCacheStorageDir = config.cumulusDiskCacheStorageDir

        self._stopEvent = threading.Event()

        self._channelListener = channelListener
        assert len(self._channelListener.ports) == 2
        self._channelFactory = channelFactory

        Runtime.initialize()
        ModuleImporter.initialize()

        self.cumulusActiveMachines = CumulusActiveMachines.CumulusActiveMachines(
            self.viewFactory
            )

        self.cumulusChannelFactoryThread = ManagedThread.ManagedThread(
            target=self._channelListener.start
            )

        self.vdm = VectorDataManager.constructVDM(
            callbackScheduler,
            self.cumulusVectorRamCacheSizeOverride,
            self.cumulusMaxRamCacheSizeOverride
            )

        if self.cumulusTrackTcmalloc:
            self.vdm.getMemoryManager().enableCountTcMallocMemoryAsEcMemory()

        self.persistentCacheIndex = CumulusNative.PersistentCacheIndex(
            viewFactory.createView(retrySeconds=10.0, numRetries=10),
            callbackScheduler
            )

        self.vdm.setPersistentCacheIndex(self.persistentCacheIndex)

        self.deleteCumulusDiskCacheIfNecessary()

        self.offlineCache = CumulusNative.DiskOfflineCache(
            callbackScheduler,
            self.cumulusDiskCacheStorageDir,
            config.cumulusDiskCacheStorageMB * 1024 * 1024,
            config.cumulusDiskCacheStorageFileCount
            )

        #If the "s3InterfaceFactory" is not in-memory, we use real out of process python.
        #it would be better if this were more explicit
        outOfProcess = self.s3InterfaceFactory is not None and self.s3InterfaceFactory.isCompatibleWithOutOfProcessDownloadPool

        self.outOfProcessPythonTasks = OutOfProcessPythonTasks.OutOfProcessPythonTasks(outOfProcess=outOfProcess)

        self.vdm.initializeOutOfProcessPythonTasks(self.outOfProcessPythonTasks.nativeTasks)

        checkpointInterval = config.cumulusCheckpointIntervalSeconds
        if checkpointInterval == 0:
            checkpointPolicy = CumulusNative.CumulusCheckpointPolicy.None()
        else:
            checkpointPolicy = CumulusNative.CumulusCheckpointPolicy.Periodic(
                checkpointInterval,
                1024 * 1024
                )

        self.cumulusWorker = self.constructCumlusWorker(
            callbackScheduler,
            CumulusNative.CumulusWorkerConfiguration(
                self.machineId,
                self.cumulusThreadCountOverride,
                checkpointPolicy,
                ExecutionContext.createContextConfiguration(),
                diagnosticsDir or ""
                ),
            self.vdm,
            self.offlineCache,
            eventHandler
            )

        self.datasetLoadService = None
        if self.s3InterfaceFactory:
            externalDatasetChannel = self.cumulusWorker.getExternalDatasetRequestChannel(
                callbackScheduler
                )
            self.datasetLoadService = PythonIoTaskService.PythonIoTaskService(
                self.s3InterfaceFactory,
                self.objectStore,
                self.vdm,
                externalDatasetChannel.makeQueuelike(callbackScheduler)
                )

        self.cumulusWorker.startComputations()

        if self.datasetLoadService:
            self.datasetLoadService.startService()
Example #4
0
    def test_disk_read_and_write_perf(self):
        if os.getenv("CUMULUS_DATA_DIR") is None:
            dataDir = tempfile.mkdtemp()
        else:
            dataDir = os.getenv("CUMULUS_DATA_DIR")
        dataDir = os.path.join(dataDir, str(uuid.uuid4()))

        diskCache = CumulusNative.DiskOfflineCache(callbackScheduler, dataDir,
                                                   100 * 1024 * 1024 * 1024,
                                                   100000)

        try:
            fiftyMegabytes = ForaNative.encodeStringInSerializedObject(
                " " * 1024 * 1024 * 50)

            logging.info("Writing to %s", dataDir)

            storedPageID = ForaNative.PageId(HashNative.Hash.sha1("pageId"),
                                             50 * 1024 * 1024,
                                             50 * 1024 * 1024)

            diskCache.store(storedPageID, fiftyMegabytes)

            t0 = time.time()

            TOTAL_SECONDS = 20.0

            totalReadBytes = [0]
            totalWriteBytes = [0]

            def readerThread():
                while time.time() - t0 < TOTAL_SECONDS:
                    diskCache.loadIfExists(storedPageID)
                    totalReadBytes[0] += 50

            def writerThread():
                ix = 0
                while time.time() - t0 < TOTAL_SECONDS:
                    ix += 1
                    diskCache.store(
                        ForaNative.PageId(HashNative.Hash.sha1(str(ix)),
                                          50 * 1024 * 1024, 50 * 1024 * 1024),
                        fiftyMegabytes)
                    totalWriteBytes[0] += 50

            threads = [
                threading.Thread(target=readerThread),
                threading.Thread(target=writerThread)
            ]

            for t in threads:
                t.start()
            for t in threads:
                t.join()

            PerformanceTestReporter.recordTest(
                "python.BigBox.Disk.ReadAndWrite.Write1GB",
                1024 / (totalWriteBytes[0] / (time.time() - t0)), None)

            PerformanceTestReporter.recordTest(
                "python.BigBox.Disk.ReadAndWrite.Read1GB",
                1024 / (totalReadBytes[0] / (time.time() - t0)), None)

        finally:
            shutil.rmtree(dataDir)
Example #5
0
    def __init__(self, ownAddress, channelListener, channelFactory,
                 eventHandler, callbackScheduler, diagnosticsDir, config,
                 viewFactory):
        Stoppable.Stoppable.__init__(self)

        #acquire a machineId randomly, using uuid
        self.machineId = CumulusNative.MachineId(
            Hash.Hash.sha1(str(uuid.uuid4())))

        self.ownAddress = ownAddress
        self.callbackScheduler = callbackScheduler
        self.viewFactory = viewFactory
        self.threadsStarted_ = False
        self.connectedMachines = set()
        self.connectingMachines = set(
        )  # machines we are in the process of connecting to
        self.droppedMachineIds = set()
        self.lock = threading.RLock()
        self.cumulusMaxRamCacheSizeOverride = config.cumulusMaxRamCacheMB * 1024 * 1024
        self.cumulusVectorRamCacheSizeOverride = config.cumulusVectorRamCacheMB * 1024 * 1024
        self.cumulusThreadCountOverride = config.cumulusServiceThreadCount
        self.cumulusTrackTcMalloc = config.cumulusTrackTcmalloc

        self.reconnectPersistentCacheIndexViewThreads = []

        if config.cumulusDiskCacheStorageSubdirectory is not None:
            self.cumulusDiskCacheWantsDeletionOnTeardown = True
            self.cumulusDiskCacheStorageDir = os.path.join(
                config.cumulusDiskCacheStorageDir,
                config.cumulusDiskCacheStorageSubdirectory)
        else:
            self.cumulusDiskCacheWantsDeletionOnTeardown = False
            self.cumulusDiskCacheStorageDir = config.cumulusDiskCacheStorageDir

        logging.info(
            "Creating a CumulusService with ram cache of %s / %s MB and %s threads",
            self.cumulusVectorRamCacheSizeOverride / 1024.0 / 1024.0,
            self.cumulusMaxRamCacheSizeOverride / 1024.0 / 1024.0,
            self.cumulusThreadCountOverride)

        self._stopEvent = threading.Event()

        self._channelListener = channelListener
        assert len(self._channelListener.ports) == 2
        self._channelFactory = channelFactory

        Runtime.initialize()
        ModuleImporter.initialize()

        self.cumulusActiveMachines = CumulusActiveMachines.CumulusActiveMachines(
            self.viewFactory)

        self.cumulusChannelFactoryThread = ManagedThread.ManagedThread(
            target=self._channelListener.start)

        self.vdm = VectorDataManager.constructVDM(
            callbackScheduler, self.cumulusVectorRamCacheSizeOverride,
            self.cumulusMaxRamCacheSizeOverride)

        if self.cumulusTrackTcMalloc:
            logging.info(
                "CumulusService enabling track-tc-malloc memory with a max cache of %s MB",
                self.cumulusMaxRamCacheSizeOverride / 1024 / 1024.0)
            self.vdm.getMemoryManager().enableCountTcMallocMemoryAsEcMemory()

        self.persistentCacheIndex = CumulusNative.PersistentCacheIndex(
            viewFactory.createView(retrySeconds=10.0, numRetries=10),
            callbackScheduler)

        self.vdm.setPersistentCacheIndex(self.persistentCacheIndex)

        self.deleteCumulusDiskCacheIfNecessary()

        self.offlineCache = CumulusNative.DiskOfflineCache(
            callbackScheduler, self.cumulusDiskCacheStorageDir,
            config.cumulusDiskCacheStorageMB * 1024 * 1024,
            config.cumulusDiskCacheStorageFileCount)

        checkpointInterval = config.cumulusCheckpointIntervalSeconds
        if checkpointInterval == 0:
            checkpointPolicy = CumulusNative.CumulusCheckpointPolicy.None ()
        else:
            checkpointPolicy = CumulusNative.CumulusCheckpointPolicy.Periodic(
                checkpointInterval, 1024 * 1024)

        self.cumulusWorker = self.constructCumlusWorker(
            callbackScheduler,
            CumulusNative.CumulusWorkerConfiguration(
                self.machineId,
                self.cumulusThreadCountOverride, checkpointPolicy,
                ExecutionContext.createContextConfiguration(), diagnosticsDir
                or ""), self.vdm, self.offlineCache, eventHandler)

        #externalDatasetChannel = self.cumulusWorker.getExternalDatasetRequestChannel(
        #callbackScheduler
        #)
        #self.datasetLoadService = PythonIoTaskService.PythonIoTaskService(
        #settings.s3InterfaceFactory,
        #settings.objectStore,
        #self.vdm,
        #externalDatasetChannel.makeQueuelike(callbackScheduler)
        #)

        self.cumulusWorker.startComputations()