Beispiel #1
0
def runApp(configHandler, mongoHandler, traceImporter, Params):
    sc = None
    if Params.isParallelExecution:
        sc = utils.createSparkContext(Params)
        print ("NUMBER OF TASKS USED = " + str(Params.numTasks) + \
                " PlEASE provide correct number should be a factor of number of workers (e.g. 5 * NUM WORKERS). PySpark API doesn't know number of workers :(")

    # TODO CPaduraru: hacked see the other comments in this file about spark / serialization of process issue
    recreateTracerProcessAtEachNewFolder = True  # isParallelExecution

    # One tracer process opened for each task. Basically these will be working processes sleeping most of the time so it shouldn't affect performance
    tracerProcesses = []
    if not recreateTracerProcessAtEachNewFolder:
        createSimpleTracerProcesses(tracerProcesses, Params)
    #---

    t0 = time.time()

    testIndex = 0
    previousPopulation = None
    while True:
        print("INFO: Starting generation [%d]" % testIndex)
        # The noEdgeProbability represents the value to use in fitness evaluation when an edge was not encountered yet in the probabilities map
        d = utils.getFolderPathFromId(testIndex)

        if previousPopulation and Params.driver:
            individuals = previousPopulation.getpopulation()
            probabilities.updateFromDB(individuals, traceImporter,
                                       Params.entryTemplate)

        workerRes = execute(sc, traceImporter, Params, tracerProcesses)
        previousPopulation = workerRes.population

        # TODO CPaduraru: again a horrible hack because python closure can include a subprocess data.
        # After solve (map part), the tracer process is set on None so we must create it back  to generate new tests folder
        if recreateTracerProcessAtEachNewFolder:
            cmd = utils.createTracerCmd(Params)
            workerRes.evalFunctor.tracerProcess = utils.createTracerProcess(
                cmd, int(Params.inputLength))
        #-----------

        # TODO Cpaduraru: hack associated with the one above
        if recreateTracerProcessAtEachNewFolder:
            utils.stopTracerProcess(workerRes.evalFunctor.tracerProcess)
        #------
        testIndex += 1

    ## Disconnect from external Driver components
    traceImporter.clear()

    # Now stop all tracer processes
    if not recreateTracerProcessAtEachNewFolder:
        stopSimpleTracerProcesses(tracerProcesses)

    dt = time.time() - t0
    print("Time to solve : %fs" % dt)
Beispiel #2
0
 def updateTracerProcess(self):
     if self.isRunningInParallel:
         self.evalFunctor.tracerProcess = utils.createTracerProcess(
             self.tracerProcessCmd, self.tracerProcessPayloadSize)
Beispiel #3
0
def createSimpleTracerProcesses(outListOfProcesses, Params):
    cmd = utils.createTracerCmd(Params)
    for i in range(Params.numTasks):
        p = utils.createTracerProcess(cmd, int(Params.inputLength))
        outListOfProcesses.append(p)