Exemple #1
0
def createWorker(machineId,
                 viewFactory,
                 callbackSchedulerToUse=None,
                 threadCount=2,
                 memoryLimitMb=100):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    vdm = ForaNative.VectorDataManager(callbackSchedulerToUse, 5 * 1024 * 1024)
    vdm.setMemoryLimit(
        int(memoryLimitMb * 1024 * 1024),
        min(int(memoryLimitMb * 1.25 * 1024 * 1024),
            int((memoryLimitMb + 1024 * 2) * 1024 * 1024)))

    vdm.setPersistentCacheIndex(
        CumulusNative.PersistentCacheIndex(viewFactory.createView(),
                                           callbackSchedulerToUse))

    cache = CumulusNative.SimpleOfflineCache(callbackSchedulerToUse,
                                             1000 * 1024 * 1024)

    eventHandler = CumulusNative.CumulusWorkerHoldEventsInMemoryEventHandler()

    return (CumulusNative.CumulusWorker(
        callbackSchedulerToUse,
        CumulusNative.CumulusWorkerConfiguration(
            machineId, threadCount,
            CumulusNative.CumulusCheckpointPolicy.None (),
            ExecutionContext.createContextConfiguration(), ""), vdm, cache,
        eventHandler), vdm, eventHandler)
Exemple #2
0
 def setUp(self):
     self.callbackScheduler = CallbackScheduler.singletonForTesting()
     self.runtime = Runtime.getMainRuntime()
     self.axioms = self.runtime.getAxioms()
     self.compiler = self.runtime.getTypedForaCompiler()
     self.builtinsAsJOV = FORANative.JudgmentOnValue.Constant(
         FORA.builtin().implVal_)
    def __init__(self, numRandVals, numRelaxations, maxForRelax, maxForRand, testAxiomsPath, seed):
        object.__init__(self)
        self.callbackScheduler = CallbackScheduler.singletonForTesting()
        self.callbackSchedulerFactory = self.callbackScheduler.getFactory()
        
        self.numRandVals = numRandVals
        self.numRelaxations = numRelaxations
        self.maxForRelax = maxForRelax
        self.maxForRand = maxForRand
        self.seed = seed

        self.runtime = Runtime.getMainRuntime()
        self.axioms = self.runtime.getAxioms()
        self.typed_fora_compiler = self.runtime.getTypedForaCompiler()

        if testAxiomsPath is not None:
            pathToUse = testAxiomsPath
        else:
            pathToUse = UNIT_TEST_AXIOMS_PATH

        self.axiom_signatures_to_test = self.loadAxiomSignaturesFromFile(pathToUse)

        self.axiom_groups = []
        for i in range(self.axioms.axiomCount):
            self.axiom_groups.append(self.axioms.getAxiomGroupByIndex(i))

        self.symbol_strings = self.loadSymbolStrings()

        numpy.random.seed(seed)
Exemple #4
0
def createWorkersAndClients(workerCount,
                            clientCount,
                            viewFactory=None,
                            memoryLimitMb=100,
                            threadCount=2,
                            callbackSchedulerToUse=None):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    if viewFactory is None:
        viewFactory = createInMemorySharedStateViewFactory(
            callbackSchedulerToUse)

    workersVdmsAndEventHandlers = [
        createWorker(machineId(ix),
                     viewFactory,
                     memoryLimitMb=memoryLimitMb,
                     threadCount=threadCount,
                     callbackSchedulerToUse=callbackSchedulerToUse)
        for ix in range(workerCount)
    ]

    clientsAndVdms = [
        createClient(clientId(ix),
                     callbackSchedulerToUse=callbackSchedulerToUse)
        for ix in range(clientCount)
    ]

    for ix1 in range(len(workersVdmsAndEventHandlers)):
        workersVdmsAndEventHandlers[ix1][0].startComputations()

    for ix1 in range(len(workersVdmsAndEventHandlers) - 1):
        for ix2 in range(ix1 + 1, len(workersVdmsAndEventHandlers)):
            worker1Channel1, worker2Channel1 = StringChannelNative.InMemoryStringChannel(
                callbackSchedulerToUse)
            worker1Channel2, worker2Channel2 = StringChannelNative.InMemoryStringChannel(
                callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix1][0].addMachine(
                machineId(ix2), [worker1Channel1, worker1Channel2],
                ForaNative.ImplValContainer(), callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix2][0].addMachine(
                machineId(ix1), [worker2Channel1, worker2Channel2],
                ForaNative.ImplValContainer(), callbackSchedulerToUse)

    for ix1 in range(len(workersVdmsAndEventHandlers)):
        for ix2 in range(len(clientsAndVdms)):
            workerChannel1, clientChannel1 = StringChannelNative.InMemoryStringChannel(
                callbackSchedulerToUse)
            workerChannel2, clientChannel2 = StringChannelNative.InMemoryStringChannel(
                callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix1][0].addCumulusClient(
                clientId(ix2), [workerChannel1, workerChannel2],
                ForaNative.ImplValContainer(), callbackSchedulerToUse)
            clientsAndVdms[ix2][0].addMachine(machineId(ix1),
                                              [clientChannel1, clientChannel2],
                                              ForaNative.ImplValContainer(),
                                              callbackSchedulerToUse)

    return workersVdmsAndEventHandlers, clientsAndVdms, viewFactory
Exemple #5
0
def createClient(clientId, callbackSchedulerToUse=None):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    vdm = ForaNative.VectorDataManager(callbackSchedulerToUse, 5 * 1024 * 1024)
    vdm.setMemoryLimit(100 * 1024 * 1024, 125 * 1024 * 1024)

    return (CumulusNative.CumulusClient(vdm, clientId,
                                        callbackSchedulerToUse), vdm)
def createWorker_(machineId,
                  viewFactory,
                  callbackSchedulerToUse,
                  threadCount,
                  memoryLimitMb,
                  cacheFunction,
                  pageSizeOverride,
                  disableEventHandler):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    vdm = ForaNative.VectorDataManager(
        callbackSchedulerToUse,
        pageSizeOverride if pageSizeOverride is not None else
        1 * 1024 * 1024 if memoryLimitMb < 1000 else
        5 * 1024 * 1024 if memoryLimitMb < 5000 else
        50 * 1024 * 1024
        )

    vdm.setMemoryLimit(
        int(memoryLimitMb * 1024 * 1024),
        min(int(memoryLimitMb * 1.25 * 1024 * 1024),
            int((memoryLimitMb + 1024 * 2) * 1024 * 1024))
        )

    vdm.setPersistentCacheIndex(
        CumulusNative.PersistentCacheIndex(
            viewFactory.createView(),
            callbackSchedulerToUse
            )
        )

    cache = cacheFunction()

    if disableEventHandler:
        eventHandler = CumulusNative.CumulusWorkerIgnoreEventHandler()
    else:
        eventHandler = CumulusNative.CumulusWorkerHoldEventsInMemoryEventHandler()

    return (
        CumulusNative.CumulusWorker(
            callbackSchedulerToUse,
            CumulusNative.CumulusWorkerConfiguration(
                machineId,
                threadCount,
                CumulusNative.CumulusCheckpointPolicy.None(),
                ExecutionContext.createContextConfiguration(),
                ""
                ),
            vdm,
            cache,
            eventHandler
            ),
        vdm,
        eventHandler
        )
def createClient_(clientId, callbackSchedulerToUse = None):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    vdm = ForaNative.VectorDataManager(callbackSchedulerToUse, 5 * 1024 * 1024)
    vdm.setMemoryLimit(100 * 1024 * 1024, 125 * 1024 * 1024)

    return (
        CumulusNative.CumulusClient(vdm, clientId, callbackSchedulerToUse),
        vdm
        )
Exemple #8
0
    def test_roundtrip_conversion_simple(self):
        vdm = FORANative.VectorDataManager(
            CallbackScheduler.singletonForTesting(), 10000000)

        for obj in [
                10, 10.0, "asdf", None, False, True, [], (), [1, 2], [1, [1]],
            (1, 2), (1, 2, []), {
                1: 2
            }
        ]:
            self.assertEqual(roundtripConvert(obj, vdm)[0], obj, obj)
Exemple #9
0
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()

        def createStorage(vdm):
            self.simpleOfflineCache = CumulusNative.SimpleOfflineCache(
                self.callbackScheduler, 1000000000)
            return self.simpleOfflineCache

        self.evaluator = LocalEvaluator.LocalEvaluator(
            createStorage, 2000000, maxPageSizeInBytes=100000)

        self.oldEvaluator = Evaluator.swapEvaluator(self.evaluator)
Exemple #10
0
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()
        self.runtime = Runtime.getMainRuntime()
        self.axioms = self.runtime.getAxioms()
        self.compiler = self.runtime.getTypedForaCompiler()
        self.builtinsAsJOV = FORANative.JudgmentOnValue.Constant(FORA.builtin().implVal_)

        pyforaPath = os.path.join(os.path.split(pyfora.__file__)[0], "fora/purePython")
        self.purePythonAsJOV = FORANative.JudgmentOnValue.Constant(FORA.importModule(pyforaPath).implVal_)

        self.instructionGraph = self.runtime.getInstructionGraph()
        self.reasoner = FORANative.SimpleForwardReasoner(self.compiler, self.instructionGraph, self.axioms)
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()
        self.runtime = Runtime.getMainRuntime()
        self.axioms = self.runtime.getAxioms()
        self.compiler = self.runtime.getTypedForaCompiler()
        self.builtinsAsJOV = FORANative.JudgmentOnValue.Constant(FORA.builtin().implVal_)

        pyforaPath = os.path.join(os.path.split(pyfora.__file__)[0], "fora/purePython")
        self.purePythonAsJOV = FORANative.JudgmentOnValue.Constant(FORA.importModule(pyforaPath).implVal_)
        
        self.instructionGraph = self.runtime.getInstructionGraph()
        self.reasoner = FORANative.SimpleForwardReasoner(self.compiler, self.instructionGraph, self.axioms)
Exemple #12
0
    def __init__(self,
                 inMemory,
                 port=None,
                 cachePathOverride='',
                 maxOpenFiles=256,
                 inMemChannelFactoryFactory=None,
                 maxLogFileSizeMb=10,
                 pingInterval=None):

        self.inMemory = inMemory
        self.manager = None
        self.callbackScheduler = CallbackScheduler.singletonForTesting()

        if self.inMemory:
            self.manager = SharedStateService.KeyspaceManager(
                10001,
                1,
                cachePathOverride=cachePathOverride,
                pingInterval=IN_MEMORY_HARNESS_PING_INTERVAL
                if pingInterval is None else pingInterval,
                maxOpenFiles=maxOpenFiles,
                maxLogFileSizeMb=maxLogFileSizeMb)

            #although named otherwise InMemoryChannelFactory is actually a factory for a channelFactory
            # or a channelFactoryFactory

            channelFactoryFactory = inMemChannelFactoryFactory if inMemChannelFactoryFactory is not None \
                    else InMemoryChannelFactory.InMemoryChannelFactory

            logging.info(channelFactoryFactory)
            self.channelFactory = channelFactoryFactory(
                self.callbackScheduler, self.manager)
            self.viewFactory = ViewFactory.ViewFactory(self.channelFactory)
        else:

            class Settings(object):
                callbackScheduler = self.callbackScheduler

            assert port is not None

            self.service = SharedStateService.SharedStateService(
                self.callbackScheduler,
                cachePathOverride=cachePathOverride,
                port=port)

            self.service.startService()
            self.service.blockUntilListening()

            self.viewFactory = ViewFactory.ViewFactory.TcpViewFactory(
                self.callbackScheduler, "localhost", port)
def createWorkersAndClients(
            workerCount,
            clientCount,
            viewFactory = None,
            memoryLimitMb = 100,
            threadCount = 2,
            callbackSchedulerToUse = None
            ):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    if viewFactory is None:
        viewFactory = createInMemorySharedStateViewFactory(callbackSchedulerToUse)

    workersVdmsAndEventHandlers = [
        createWorker(
            machineId(ix),
            viewFactory,
            memoryLimitMb = memoryLimitMb,
            threadCount=threadCount,
            callbackSchedulerToUse = callbackSchedulerToUse
            ) for ix in range(workerCount)
        ]

    clientsAndVdms = [
        createClient(
            clientId(ix),
            callbackSchedulerToUse = callbackSchedulerToUse
            )
        for ix in range(clientCount)
        ]

    for ix1 in range(len(workersVdmsAndEventHandlers)):
        workersVdmsAndEventHandlers[ix1][0].startComputations()

    for ix1 in range(len(workersVdmsAndEventHandlers)-1):
        for ix2 in range(ix1 + 1, len(workersVdmsAndEventHandlers)):
            worker1Channel1, worker2Channel1 = StringChannelNative.InMemoryStringChannel(callbackSchedulerToUse)
            worker1Channel2, worker2Channel2 = StringChannelNative.InMemoryStringChannel(callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix1][0].addMachine(machineId(ix2), [worker1Channel1, worker1Channel2], ForaNative.ImplValContainer(), callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix2][0].addMachine(machineId(ix1), [worker2Channel1, worker2Channel2], ForaNative.ImplValContainer(), callbackSchedulerToUse)

    for ix1 in range(len(workersVdmsAndEventHandlers)):
        for ix2 in range(len(clientsAndVdms)):
            workerChannel1, clientChannel1 = StringChannelNative.InMemoryStringChannel(callbackSchedulerToUse)
            workerChannel2, clientChannel2 = StringChannelNative.InMemoryStringChannel(callbackSchedulerToUse)
            workersVdmsAndEventHandlers[ix1][0].addCumulusClient(clientId(ix2), [workerChannel1, workerChannel2], ForaNative.ImplValContainer(), callbackSchedulerToUse)
            clientsAndVdms[ix2][0].addMachine(machineId(ix1), [clientChannel1, clientChannel2], ForaNative.ImplValContainer(), callbackSchedulerToUse)

    return workersVdmsAndEventHandlers, clientsAndVdms, viewFactory
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()

        def createStorage(vdm):
            self.simpleOfflineCache = CumulusNative.SimpleOfflineCache(self.callbackScheduler, 1000000000)
            return self.simpleOfflineCache

        self.evaluator = LocalEvaluator.LocalEvaluator(
            createStorage,
            2000000,
            maxPageSizeInBytes = 100000
            )

        self.oldEvaluator = Evaluator.swapEvaluator(self.evaluator)
Exemple #15
0
    def __init__(self,
            inMemory,
            port = None,
            cachePathOverride = '',
            maxOpenFiles = 256,
            inMemChannelFactoryFactory = None,
            maxLogFileSizeMb = 10,
            pingInterval = None):

        self.inMemory = inMemory
        self.manager = None
        self.callbackScheduler = CallbackScheduler.singletonForTesting()

        if self.inMemory:
            self.manager = SharedStateService.KeyspaceManager(
                10001,
                1,
                cachePathOverride=cachePathOverride,
                pingInterval = IN_MEMORY_HARNESS_PING_INTERVAL if pingInterval is None else pingInterval,
                maxOpenFiles=maxOpenFiles,
                maxLogFileSizeMb=maxLogFileSizeMb
                )

            #although named otherwise InMemoryChannelFactory is actually a factory for a channelFactory
            # or a channelFactoryFactory

            channelFactoryFactory = inMemChannelFactoryFactory if inMemChannelFactoryFactory is not None \
                    else InMemoryChannelFactory.InMemoryChannelFactory

            logging.info(channelFactoryFactory)
            self.channelFactory = channelFactoryFactory(self.callbackScheduler, self.manager)
            self.viewFactory = ViewFactory.ViewFactory(self.channelFactory)
        else:
            class Settings(object):
                callbackScheduler = self.callbackScheduler

            assert port is not None

            self.service = SharedStateService.SharedStateService(
                    self.callbackScheduler,
                    cachePathOverride=cachePathOverride,
                    port=port
                    )

            self.service.startService()
            self.service.blockUntilListening()

            self.viewFactory = ViewFactory.ViewFactory.TcpViewFactory(self.callbackScheduler, "localhost", port)
Exemple #16
0
def createInMemorySharedStateViewFactory(callbackSchedulerToUse=None):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    sharedStateManager = SharedStateService.KeyspaceManager(
        10001,
        1,
        cachePathOverride="",
        pingInterval=IN_MEMORY_CLUSTER_SS_PING_INTERVAL,
        maxOpenFiles=100)

    sharedStateChannelFactory = (
        InMemorySharedStateChannelFactory.InMemoryChannelFactory(
            callbackSchedulerToUse.getFactory().createScheduler(
                "SharedState", 1), sharedStateManager))

    return ViewFactory.ViewFactory(sharedStateChannelFactory)
Exemple #17
0
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()
        self.runtime = Runtime.getMainRuntime()
        self.axioms = self.runtime.getAxioms()
        self.native_runtime = self.runtime.getTypedForaCompiler()
        self.vals_to_test = self.loadValuesFromFile(
            os.path.join(os.path.split(__file__)[0], "AxiomJOA_test.txt"))

        self.evaluator = LocalEvaluator.LocalEvaluator(
            lambda vdm: CumulusNative.SimpleOfflineCache(
                self.callbackScheduler, 1000000000),
            10000000,
            maxPageSizeInBytes=100000)
        self.oldEvaluator = Evaluator.swapEvaluator(self.evaluator)

        self.knownModulesAsConstantJOVs = dict()
        self.knownModulesAsConstantJOVs["builtin"] = \
                FORANative.JudgmentOnValue.Constant(FORA.builtin().implVal_)
Exemple #18
0
    def setUp(self):
        self.callbackScheduler = CallbackScheduler.singletonForTesting()
        self.runtime = Runtime.getMainRuntime()
        self.axioms = self.runtime.getAxioms()
        self.native_runtime = self.runtime.getTypedForaCompiler()
        self.vals_to_test = self.loadValuesFromFile(os.path.join(os.path.split(__file__)[0],
                                                        "AxiomJOA_test.txt"))

        self.evaluator = LocalEvaluator.LocalEvaluator(
                            lambda vdm: CumulusNative.SimpleOfflineCache(self.callbackScheduler, 1000000000),
                            10000000,
                            maxPageSizeInBytes = 100000
                            )
        self.oldEvaluator = Evaluator.swapEvaluator(self.evaluator)

        self.knownModulesAsConstantJOVs = dict()
        self.knownModulesAsConstantJOVs["builtin"] = \
                FORANative.JudgmentOnValue.Constant(FORA.builtin().implVal_)
Exemple #19
0
    def test_roundtrip_convert_function(self):
        vdm = FORANative.VectorDataManager(
            CallbackScheduler.singletonForTesting(), 10000000)

        self.assertTrue(
            roundtripConvert(
                ThisIsAFunction, vdm, allowUserCodeModuleLevelLookups=True)[0]
            is ThisIsAFunction)
        self.assertTrue(
            roundtripConvert(
                ThisIsAClass, vdm, allowUserCodeModuleLevelLookups=True)[0] is
            ThisIsAClass)
        self.assertTrue(
            isinstance(
                roundtripConvert(ThisIsAClass(),
                                 vdm,
                                 allowUserCodeModuleLevelLookups=True)[0],
                ThisIsAClass))
def createInMemorySharedStateViewFactory(callbackSchedulerToUse = None):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    sharedStateManager = SharedStateService.KeyspaceManager(
        10001,
        1,
        cachePathOverride="",
        pingInterval = IN_MEMORY_CLUSTER_SS_PING_INTERVAL,
        maxOpenFiles=100
        )

    sharedStateChannelFactory = (
        InMemorySharedStateChannelFactory.InMemoryChannelFactory(
            callbackSchedulerToUse.getFactory().createScheduler("SharedState", 1),
            sharedStateManager
            )
        )

    return ViewFactory.ViewFactory(sharedStateChannelFactory)
def createWorker(machineId, viewFactory, callbackSchedulerToUse = None, threadCount = 2, memoryLimitMb = 100):
    if callbackSchedulerToUse is None:
        callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    vdm = ForaNative.VectorDataManager(callbackSchedulerToUse, 5 * 1024 * 1024)
    vdm.setMemoryLimit(
        int(memoryLimitMb * 1024 * 1024),
        min(int(memoryLimitMb * 1.25 * 1024 * 1024),
            int((memoryLimitMb + 1024 * 2) * 1024 * 1024))
        )

    vdm.setPersistentCacheIndex(
        CumulusNative.PersistentCacheIndex(
            viewFactory.createView(),
            callbackSchedulerToUse
            )
        )

    cache = CumulusNative.SimpleOfflineCache(callbackSchedulerToUse, 1000 * 1024 * 1024)

    eventHandler = CumulusNative.CumulusWorkerHoldEventsInMemoryEventHandler()

    return (
        CumulusNative.CumulusWorker(
            callbackSchedulerToUse,
            CumulusNative.CumulusWorkerConfiguration(
                machineId,
                threadCount,
                CumulusNative.CumulusCheckpointPolicy.None(),
                ExecutionContext.createContextConfiguration(),
                ""
                ),
            vdm,
            cache,
            eventHandler
            ),
        vdm,
        eventHandler
        )
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import unittest
import time
import ufora.FORA.python.FORA as FORA
import ufora.cumulus.test.InMemoryCumulusSimulation as InMemoryCumulusSimulation
import ufora.distributed.S3.InMemoryS3Interface as InMemoryS3Interface
import ufora.native.CallbackScheduler as CallbackScheduler
import ufora.test.PerformanceTestReporter as PerformanceTestReporter
import ufora.FORA.python.Runtime as Runtime

callbackScheduler = CallbackScheduler.singletonForTesting()

class BigboxStringPerformanceTest(unittest.TestCase):
    def computeUsingSeveralWorkers(self, *args, **kwds):
        return InMemoryCumulusSimulation.computeUsingSeveralWorkers(*args, **kwds)

    def stringCreationAndSumTest(self, totalStrings, workers, threadsPerWorker, testName):
        s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()

        #we wish we could actually test that we achieve saturation here but we can't yet.
        text = """Vector.range(%s, String).sum(size)""" % totalStrings

        t0 = time.time()

        _, simulation = \
            self.computeUsingSeveralWorkers(
Exemple #23
0
import logging
import traceback
import ufora.native.Cumulus as CumulusNative
import ufora.FORA.python.FORA as FORA
import ufora.cumulus.test.InMemoryCumulusSimulation as InMemoryCumulusSimulation
import ufora.distributed.S3.InMemoryS3Interface as InMemoryS3Interface
import ufora.distributed.S3.ActualS3Interface as ActualS3Interface
import ufora.native.CallbackScheduler as CallbackScheduler
import ufora.test.PerformanceTestReporter as PerformanceTestReporter
import ufora.FORA.python.Runtime as Runtime
import uuid
import os
import ufora.distributed.Storage.S3ObjectStore as S3ObjectStore
import ufora.config.Setup as Setup

callbackScheduler = CallbackScheduler.singletonForTesting()


class BigboxPerformanceTest(unittest.TestCase):
    def getTestDataBucket(self):
        aws_az_key = 'AWS_AVAILABILITY_ZONE'
        bucketName = 'ufora-test-data'
        if aws_az_key in os.environ:
            az = os.environ[aws_az_key]
            if az is not '':
                region = az[:-1]
                bucketName += '-' + region
                logging.info("Resolved az: %s, region: %s", az, region)
            else:
                logging.info("No availability zone resolved")
def computeUsingSeveralWorkers(expressionText,
                               s3Service,
                               count,
                               objectStore=None,
                               wantsStats=False,
                               timeout=10,
                               returnEverything=False,
                               memoryLimitMb=100,
                               blockUntilConnected=False,
                               keepSimulationAlive=False,
                               sharedStateViewFactory=None,
                               threadCount=2):
    if keepSimulationAlive:
        assert returnEverything, \
            "can't keep the simulation alive and not return it. how would you shut it down?"

    callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    if sharedStateViewFactory is None:
        sharedStateViewFactory = createInMemorySharedStateViewFactory(
                                    callbackSchedulerToUse = callbackSchedulerToUse
                                    )

    workersVdmsAndEventHandlers, clientsAndVdms, viewFactory = (
        createWorkersAndClients(
            count,
            1,
            sharedStateViewFactory,
            memoryLimitMb = memoryLimitMb,
            threadCount = threadCount
            )
        )

    client = clientsAndVdms[0][0]
    clientVdm = clientsAndVdms[0][1]

    loadingServices = []

    for ix in range(len(workersVdmsAndEventHandlers)):
        worker = workersVdmsAndEventHandlers[ix][0]
        workerVdm = workersVdmsAndEventHandlers[ix][1]

        s3InterfaceFactory = s3Service.withMachine(ix)
        if objectStore is None:
            objectStore = S3ObjectStore.S3ObjectStore(
                s3InterfaceFactory,
                Setup.config().userDataS3Bucket,
                prefix="test/")

        loadingService = PythonIoTaskService.PythonIoTaskService(
            s3InterfaceFactory,
            objectStore,
            workerVdm,
            worker.getExternalDatasetRequestChannel(callbackSchedulerToUse).makeQueuelike(callbackSchedulerToUse)
            )
        loadingService.startService()

        loadingServices.append(loadingService)

    if blockUntilConnected:
        for worker,vdm,eventHandler in workersVdmsAndEventHandlers:
            blockUntilWorkerIsConnected(worker, 2.0)

    if isinstance(expressionText, CumulusNative.ComputationDefinition):
        computationDefinition = expressionText
    else:
        computationDefinition = (
            createComputationDefinition(
                FORA.extractImplValContainer(
                    FORA.eval("fun() {" + expressionText + " } ")
                    ),
                ForaNative.makeSymbol("Call")
                )
            )

    teardownGates = []
    for client, vdm in clientsAndVdms:
        teardownGates.append(vdm.getVdmmTeardownGate())

    for worker, vdm, eventHandler in workersVdmsAndEventHandlers:
        teardownGates.append(vdm.getVdmmTeardownGate())

    simulationDict = {
        "result": None,
        "timedOut": None,
        "stats": None,
        "clientsAndVdms": clientsAndVdms,
        "workersVdmsAndEventHandlers": workersVdmsAndEventHandlers,
        "s3Service": s3Service,
        "loadingServices": loadingServices,
        "sharedStateViewFactory": sharedStateViewFactory,
        "client": client,
        "teardownGates": teardownGates
        }
    try:
        listener = client.createListener()

        computationSubmitTime = time.time()

        computationId = client.createComputation(computationDefinition)

        client.setComputationPriority(
            computationId,
            CumulusNative.ComputationPriority(1)
            )

        if returnEverything:
            valAndStatsOrNone = waitForResult(listener, computationId, clientVdm, timeout=timeout, wantsStats=True)

            computationReturnTime = time.time()

            if valAndStatsOrNone is None:
                #we timed out
                val = None
                stats = None
                timedOut = True
            else:
                val, stats = valAndStatsOrNone
                timedOut = False

            simulationDict.update({
                "result": val,
                "stats": stats,
                "timedOut": timedOut,
                "computationId": computationId,
                "listener": listener,
                "totalTimeToReturnResult": computationReturnTime - computationSubmitTime
                })

            return simulationDict
        else:
            return waitForResult(listener, computationId, clientVdm, timeout=timeout, wantsStats=wantsStats)
    finally:
        if not keepSimulationAlive:
            teardownSimulation(simulationDict)
    def __init__(self,
                workerCount,
                clientCount,
                memoryPerWorkerMB,
                threadsPerWorker,
                s3Service,
                objectStore=None,
                callbackScheduler=None,
                sharedStateViewFactory=None,
                ioTaskThreadOverride=None,
                useInMemoryCache=True,
                channelThroughputMBPerSecond=None,
                pageSizeOverride=None,
                disableEventHandler=False,
                machineIdHashSeed=None
                ):
        self.useInMemoryCache = useInMemoryCache
        self.machineIdHashSeed = machineIdHashSeed

        if not self.useInMemoryCache:
            self.diskCacheCount = 0
            if os.getenv("CUMULUS_DATA_DIR") is None:
                self.diskCacheStorageDir = tempfile.mkdtemp()
            else:
                self.diskCacheStorageDir = os.path.join(
                    os.getenv("CUMULUS_DATA_DIR"),
                    str(uuid.uuid4())
                    )
        self.ioTaskThreadOverride = ioTaskThreadOverride
        self.workerCount = 0
        self.disableEventHandler = disableEventHandler
        self.clientCount = 0
        self.memoryPerWorkerMB = memoryPerWorkerMB
        self.threadsPerWorker = threadsPerWorker
        self.s3Service = s3Service
        self.objectStore = objectStore
        if self.objectStore is None:
            s3 = s3Service()
            if isinstance(s3, InMemoryS3Interface.InMemoryS3Interface):
                objectStoreBucket = "object_store_bucket"
                s3.setKeyValue(objectStoreBucket, 'dummyKey', 'dummyValue')
                s3.deleteKey(objectStoreBucket, 'dummyKey')
            else:
                objectStoreBucket = Setup.config().userDataS3Bucket
            self.objectStore = S3ObjectStore.S3ObjectStore(
                s3Service,
                objectStoreBucket,
                prefix="test/")
        self.callbackScheduler = callbackScheduler or CallbackScheduler.singletonForTesting()
        self.sharedStateViewFactory = (
            sharedStateViewFactory or createInMemorySharedStateViewFactory(self.callbackScheduler)
            )
        self.channelThroughputMBPerSecond = channelThroughputMBPerSecond
        self.resultVDM = ForaNative.VectorDataManager(self.callbackScheduler, 5 * 1024 * 1024)
        self.pageSizeOverride = pageSizeOverride

        self.rateLimitedChannelGroupsForEachListener = []
        self.workersVdmsAndEventHandlers = []
        self.machineIds = []
        self.machineIdsEverAllocated = 0
        self.clientsAndVdms = []
        self.loadingServices = []
        self.clientTeardownGates = []
        self.workerTeardownGates = []


        for ix in range(workerCount):
            self.addWorker()
        for ix in range(clientCount):
            self.addClient()

        if clientCount:
            self.listener = self.getClient(0).createListener()
        else:
            self.listener = None
 def setUp(self):
     self.runtime = Runtime.getMainRuntime()
     self.axioms = self.runtime.getAxioms()
     self.native_runtime = self.runtime.getTypedForaCompiler()
     self.vdm = FORANative.VectorDataManager(CallbackScheduler.singletonForTesting(), 10000000)
 def setUp(self):
     self.callbackScheduler = CallbackScheduler.singletonForTesting()
     self.runtime = Runtime.getMainRuntime()
     self.axioms = self.runtime.getAxioms()
     self.compiler = self.runtime.getTypedForaCompiler()
     self.builtinsAsJOV = FORANative.JudgmentOnValue.Constant(FORA.builtin().implVal_)
Exemple #28
0
 def setUp(self):
     self.runtime = Runtime.getMainRuntime()
     self.axioms = self.runtime.getAxioms()
     self.native_runtime = self.runtime.getTypedForaCompiler()
     self.vdm = FORANative.VectorDataManager(
         CallbackScheduler.singletonForTesting(), 10000000)
 def setUp(self):
     self.callbackScheduler = CallbackScheduler.singletonForTesting()
Exemple #30
0
def computeUsingSeveralWorkers(expressionText,
                               s3Service,
                               count,
                               objectStore=None,
                               wantsStats=False,
                               timeout=10,
                               returnEverything=False,
                               memoryLimitMb=100,
                               blockUntilConnected=False,
                               keepSimulationAlive=False,
                               sharedStateViewFactory=None,
                               threadCount=2):
    if keepSimulationAlive:
        assert returnEverything, \
            "can't keep the simulation alive and not return it. how would you shut it down?"

    callbackSchedulerToUse = CallbackScheduler.singletonForTesting()

    if sharedStateViewFactory is None:
        sharedStateViewFactory = createInMemorySharedStateViewFactory(
            callbackSchedulerToUse=callbackSchedulerToUse)

    workersVdmsAndEventHandlers, clientsAndVdms, viewFactory = (
        createWorkersAndClients(count,
                                1,
                                sharedStateViewFactory,
                                memoryLimitMb=memoryLimitMb,
                                threadCount=threadCount))

    client = clientsAndVdms[0][0]
    clientVdm = clientsAndVdms[0][1]

    loadingServices = []

    for ix in range(len(workersVdmsAndEventHandlers)):
        worker = workersVdmsAndEventHandlers[ix][0]
        workerVdm = workersVdmsAndEventHandlers[ix][1]

        s3InterfaceFactory = s3Service.withMachine(ix)
        if objectStore is None:
            objectStore = S3ObjectStore.S3ObjectStore(
                s3InterfaceFactory,
                Setup.config().userDataS3Bucket,
                prefix="test/")

        loadingService = PythonIoTaskService.PythonIoTaskService(
            s3InterfaceFactory, objectStore, workerVdm,
            worker.getExternalDatasetRequestChannel(
                callbackSchedulerToUse).makeQueuelike(callbackSchedulerToUse))
        loadingService.startService()

        loadingServices.append(loadingService)

    if blockUntilConnected:
        for worker, vdm, eventHandler in workersVdmsAndEventHandlers:
            blockUntilWorkerIsConnected(worker, 2.0)

    if isinstance(expressionText, CumulusNative.ComputationDefinition):
        computationDefinition = expressionText
    else:
        computationDefinition = (createComputationDefinition(
            FORA.extractImplValContainer(
                FORA.eval("fun() {" + expressionText + " } ")),
            ForaNative.makeSymbol("Call")))

    teardownGates = []
    for client, vdm in clientsAndVdms:
        teardownGates.append(vdm.getVdmmTeardownGate())

    for worker, vdm, eventHandler in workersVdmsAndEventHandlers:
        teardownGates.append(vdm.getVdmmTeardownGate())

    simulationDict = {
        "result": None,
        "timedOut": None,
        "stats": None,
        "clientsAndVdms": clientsAndVdms,
        "workersVdmsAndEventHandlers": workersVdmsAndEventHandlers,
        "s3Service": s3Service,
        "loadingServices": loadingServices,
        "sharedStateViewFactory": sharedStateViewFactory,
        "client": client,
        "teardownGates": teardownGates
    }
    try:
        listener = client.createListener()

        computationSubmitTime = time.time()

        computationId = client.createComputation(computationDefinition)

        client.setComputationPriority(computationId,
                                      CumulusNative.ComputationPriority(1))

        if returnEverything:
            valAndStatsOrNone = waitForResult(listener,
                                              computationId,
                                              clientVdm,
                                              timeout=timeout,
                                              wantsStats=True)

            computationReturnTime = time.time()

            if valAndStatsOrNone is None:
                #we timed out
                val = None
                stats = None
                timedOut = True
            else:
                val, stats = valAndStatsOrNone
                timedOut = False

            simulationDict.update({
                "result":
                val,
                "stats":
                stats,
                "timedOut":
                timedOut,
                "computationId":
                computationId,
                "listener":
                listener,
                "totalTimeToReturnResult":
                computationReturnTime - computationSubmitTime
            })

            return simulationDict
        else:
            return waitForResult(listener,
                                 computationId,
                                 clientVdm,
                                 timeout=timeout,
                                 wantsStats=wantsStats)
    finally:
        if not keepSimulationAlive:
            teardownSimulation(simulationDict)
Exemple #31
0
 def setUpClass(cls):
     cls.vdm = ForaNative.VectorDataManager(
         CallbackScheduler.singletonForTesting(), 10000000)