示例#1
0
    scenario.scenID = stamp    

    # save scenario model instance with timestamp
    with open(os.path.join(tempDir,str(stamp)+'.pckl'), 'w') as f:
        pickle.dump(scenario, f)
    
    return scenario.scenID

if __name__ == "__main__":

    # set up path
    rootPath = pathToRepository
    script = argv[0]
    _scenType = argv[1]
    _numFollowers = int(argv[2])
    _numAggregators = int(argv[3])
    _aggPerFollower = int(argv[4])
    _policy = argv[5]
    _numFacts = int(argv[6])
    _valRange = int(argv[7])
    _ruleScenario = argv[8]
    _numPeers = int(argv[9])
    scenarios = driver.simpleMAF(_scenType, _numFollowers, _numAggregators, _aggPerFollower, _policy, _numFacts, _valRange, _ruleScenario, _numPeers)  # create a list of scenario instances
    for s in scenarios:
        generateScenarioFiles( s, rootPath )    # generate each scenario
    #sys.path.append(os.path.join(rootPath,'/webdamlog-exp'))
    
    driver.localSVNCommit( commitPath )   # commit the results
    
    exit()
示例#2
0
        pickle.dump(scenario, f)

    return scenario.scenID


if __name__ == "__main__":

    # set up path
    rootPath = pathToRepository
    script = argv[0]
    _scenType = argv[1]
    _numFollowers = int(argv[2])
    _numAggregators = int(argv[3])
    _aggPerFollower = int(argv[4])
    _policy = argv[5]
    _numFacts = int(argv[6])
    _valRange = int(argv[7])
    _ruleScenario = argv[8]
    _numPeers = int(argv[9])
    scenarios = driver.simpleMAF(
        _scenType, _numFollowers, _numAggregators, _aggPerFollower, _policy,
        _numFacts, _valRange, _ruleScenario,
        _numPeers)  # create a list of scenario instances
    for s in scenarios:
        generateScenarioFiles(s, rootPath)  # generate each scenario
    #sys.path.append(os.path.join(rootPath,'/webdamlog-exp'))

    driver.localSVNCommit(commitPath)  # commit the results

    exit()
示例#3
0
def executeScenario(pathToRepository, scenID, scenType, mode, timeToRun,
                    masterDelay):

    stamp = int(time.time() * 1000)

    # construct execution record, to be pickled
    execution = models.Execution( \
        execID = stamp, \
        scenID = scenID, \
        timeToRun = timeToRun, \
        mode = mode, \
        build = build )          # numTicks is now ignored; remove

    # this is the directory containing the scenario: e.g. webdamlog-exp/MAF/1385388824301
    scenPath = os.path.join('webdamlog-exp', scenType, str(scenID))

    # make sure scenario path exists locally
    localScenPath = os.path.join(pathToRepository, scenPath)
    assert os.path.exists(localScenPath)

    # this is the path to dir where output will be written (by remote peers later)
    # e.g. webdamlog-exp/MAF/1385388824301/exec_str99999999999d
    execPath = os.path.join(scenPath, 'exec_' + str(stamp))
    localExecPath = os.path.join(os.path.join(pathToRepository, execPath))

    # create directory for execution within scenario dir, svn add and commit
    os.makedirs(localExecPath)
    with open(os.path.join(localExecPath, str(stamp) + '.pckl'), 'w') as f:
        pickle.dump(execution, f)
    os.makedirs(os.path.join(localExecPath, 'bench_files')
                )  # also create this directory to avoid svn conflict at peers
    driver.localSVNCommit(localScenPath)

    print localScenPath

    # inspect scenario for 'out_' directories, infer hosts
    outs = []
    for output in glob.glob(os.path.join(localScenPath, 'out_*')):
        outs.append(output)
    print outs
    outKey = os.path.split(outs[0])[1].split('_')[
        2]  # this gets common key from name of out* directories
    print outKey
    hosts = []
    masterHost = None
    for out in outs:
        extractedHostName = os.path.split(out)[1].split('_')[1]
        if (len(glob.glob(os.path.join(out, 'run_master*')))) == 1:
            masterHost = extractedHostName
        if (len(glob.glob(os.path.join(out, 'run_sue*')))) == 1:
            masterHost = extractedHostName
        hosts.append(extractedHostName)
    assert (masterHost != None)

    execution.success = True
    start = time.time()

    env.hosts = hosts
    env.parallel = True  # execute on each host in parallel
    try:
        execute(fab.pull_svn, scenPath=scenPath
                )  # each host should pull latest code and latest exp
    except:
        print >> sys.stderr, 'Pull failed: ', sys.exc_info()[0]
        execution.success = False

    #only try to run ruby if the pull of files worked
    if execution.success == True:
        # prepare parameters for ruby script
        paramString = ''
        print "the mode which is set is", mode
        accessBool = mode & 1
        optim1Bool = mode & 2
        optim2Bool = mode & 4
        if (accessBool):
            paramString += 'access' + ' '
        if (optim1Bool):
            paramString += 'optim1' + ' '
        if (optim2Bool):
            paramString += 'optim2' + ' '

        # run on all hosts
        try:
            execute(fab.run_ruby,
                    execPath=execPath,
                    scenPath=scenPath,
                    paramString=paramString,
                    outKey=str(outKey))
        except CommandTimeout:
            execution.success = False
            #don't want to check the files in, this run is invalid
            return
        except:
            print >> sys.stderr, 'Execution failed: ', sys.exc_info()[0]
            execution.success = False

        try:
            execute(fab.run_commit, execPath=execPath)
        except:
            print >> sys.stderr, 'Failed to commit files, please add them manually: ', sys.exc_info(
            )[0]
            #this does not change the execution success

    execution.runTime = time.time() - start

    # pickle object
    with open(os.path.join(localExecPath, str(stamp) + '.pckl'), 'w') as f:
        pickle.dump(execution, f)


# refresh database for this execution
#    execute(fab.pull_svn)      # make sure files generated at all hosts are in
#    loadBenchmark.processExecs( scenID, localExecPath)

    driver.localSVNCommit(localScenPath)

    return execution.execID
示例#4
0
def executeScenario( pathToRepository, scenID, scenType, mode, timeToRun, masterDelay ):

    stamp = int(time.time()*1000)
    
    # construct execution record, to be pickled
    execution = models.Execution( \
        execID = stamp, \
        scenID = scenID, \
        timeToRun = timeToRun, \
        mode = mode, \
        build = build )          # numTicks is now ignored; remove 
    
    # this is the directory containing the scenario: e.g. webdamlog-exp/MAF/1385388824301
    scenPath = os.path.join('webdamlog-exp',scenType,str(scenID))
    
    # make sure scenario path exists locally
    localScenPath = os.path.join(pathToRepository,scenPath)
    assert os.path.exists(localScenPath)
    
    # this is the path to dir where output will be written (by remote peers later)
    # e.g. webdamlog-exp/MAF/1385388824301/exec_str99999999999d
    execPath = os.path.join(scenPath,'exec_'+str(stamp))
    localExecPath = os.path.join( os.path.join(pathToRepository,execPath))

    # create directory for execution within scenario dir, svn add and commit
    os.makedirs(localExecPath)
    with open(os.path.join(localExecPath,str(stamp)+'.pckl'), 'w') as f:
        pickle.dump(execution, f)
    os.makedirs(os.path.join(localExecPath,'bench_files'))  # also create this directory to avoid svn conflict at peers
    driver.localSVNCommit(localScenPath)

    print localScenPath

    # inspect scenario for 'out_' directories, infer hosts
    outs =[]
    for output in  glob.glob( os.path.join(localScenPath,'out_*')):
    	outs.append(output)
    print outs
    outKey = os.path.split(outs[0])[1].split('_')[2]  # this gets common key from name of out* directories
    print outKey
    hosts = []
    masterHost = None
    for out in outs:
        extractedHostName = os.path.split(out)[1].split('_')[1]
	if (len(glob.glob(os.path.join(out,'run_master*')))) == 1:
            masterHost = extractedHostName
        if (len(glob.glob(os.path.join(out,'run_sue*')))) == 1:
            masterHost = extractedHostName
        hosts.append(extractedHostName)
    assert(masterHost != None)

    execution.success = True
    start = time.time()

    env.hosts = hosts
    env.parallel = True     # execute on each host in parallel
    try:
        execute(fab.pull_svn, scenPath=scenPath)         # each host should pull latest code and latest exp
    except:
        print >> sys.stderr, 'Pull failed: ', sys.exc_info()[0]
        execution.success = False

    #only try to run ruby if the pull of files worked
    if execution.success == True:
        # prepare parameters for ruby script
        paramString = ''
        print "the mode which is set is",  mode
        accessBool = mode & 1
        optim1Bool = mode & 2
        optim2Bool = mode & 4
        if (accessBool):
            paramString += 'access'+' '
        if (optim1Bool):
            paramString += 'optim1'+' '
        if (optim2Bool):
            paramString += 'optim2'+' '
            
        # run on all hosts
        try:
            execute(fab.run_ruby, execPath=execPath, scenPath=scenPath, paramString=paramString, outKey=str(outKey))
        except CommandTimeout:
            execution.success = False
            #don't want to check the files in, this run is invalid
            return
        except:
            print >> sys.stderr, 'Execution failed: ', sys.exc_info()[0]
            execution.success = False

        try:
            execute(fab.run_commit, execPath=execPath)
        except:
            print >> sys.stderr, 'Failed to commit files, please add them manually: ', sys.exc_info()[0]
            #this does not change the execution success
    
    execution.runTime = time.time() - start

    # pickle object
    with open(os.path.join(localExecPath,str(stamp)+'.pckl'), 'w') as f:
        pickle.dump(execution, f)

# refresh database for this execution
#    execute(fab.pull_svn)      # make sure files generated at all hosts are in
#    loadBenchmark.processExecs( scenID, localExecPath)

    driver.localSVNCommit(localScenPath)
    
    return execution.execID
示例#5
0
def run(configFile):

    config = ConfigParser.ConfigParser()
    config.read(configFile)

    rootPath = os.environ["HOME"] + "/"+ config.get('environment', 'rootPath')
    scenType = config.get('default', 'scenarioType')
    scenarioList = []
    
    if scenType == 'MAF':

        # set-valued parameters (space delimited in config file)
        policyList = config.get('scenarioMAF', 'policy').split(' ')
        ruleScenarioList = config.get('scenarioMAF', 'ruleScenario').split(' ')
        numFollowersList = config.get('scenarioMAF', 'numFollowers').split(' ')
        numAggregatorsList = config.get('scenarioMAF', 'numAggregators').split(' ')
        numAgPerFollowerList = config.get('scenarioMAF', 'aggPerFollower').split(' ')
        numFactsList = config.get('scenarioMAF', 'numFacts').split(' ')
        
        # this forms the crossproduct of all set-valued parameters
        for tup in itertools.product(policyList, ruleScenarioList, numFollowersList, numAggregatorsList, numAgPerFollowerList, numFactsList):       
            print tup
            scenario = models.Scenario( \
                # scenID = _ _ _ (filled in later)
                scenType = 'MAF', \
                numFollowers = int(tup[2]), \
                numAggregators = int(tup[3]), \
                aggPerFollower = int(tup[4]), \
                policy = tup[0], \
                numFacts = int(tup[5]), \
                ruleScenario = tup[1], \
                #valRange = config.getint('scenarioMAF', 'valRange'), \
                valRange = tup[5], \
                numExtraCols = config.getint('scenarioMAF', 'numExtraCols'), \
                numHosts = config.getint('scenarioMAF', 'numHosts'), \
                hosts = config.get('scenarioMAF', 'hosts').split(' '), \
                numPeersPerHost = config.getint('scenarioMAF', 'numPeersPerHost') )
            scenarioList.append(scenario)


    if scenType == 'PA':
        
        policyList = config.get('scenarioPA', 'policy').split(' ')
        networkFileList = config.get('scenarioPA', 'networkFile').split(' ')
        numFactsList = config.get('scenarioPA', 'numFacts').split(' ')
        
        for tup in itertools.product(policyList, networkFileList, numFactsList):
            print tup
            numf = int(tup[1].split('-')[1][1:])-3
            numh = config.getint('scenarioPA', 'numHosts')
            scenario = models.Scenario( \
                scenType = 'PA', \
                numFollowers = numf, \
                numAggregators = 0, \
                aggPerFollower = 0, \
                policy = tup[0], \
                numFacts = int(tup[2]), \
                ruleScenario = 'PA', \
                valRange = int(tup[2]), \
                numExtraCols = 0, \
                numHosts = numh, \
                hosts = config.get('scenarioPA', 'hosts').split(' '), \
                numPeersPerHost = numf/(numh-1)+1, \
                networkFile = tup[1] )
            scenarioList.append(scenario)


    print '***  Checking / creating %i scenarios...' % len(scenarioList)
    # get scenario IDs after matching or creating scenarios
    scenIDList = matchOrCreateScenario(scenarioList, rootPath)

    driver.localSVNCommit( os.path.join(rootPath, 'webdamlog-exp') )

    # start on executions...
    # set-valued execution parameters (space delimited in config file)
    accessCList = config.get('execution', 'accessControl').split(' ')
    
    for scenID in scenIDList:   # run all the executions, for each scenID
        for run in range( config.getint('execution', 'numRuns') ):
            print 'Running executions for scenID %i' % scenID
            for tup in accessCList:
                print 'the string is:', tup
                mode = int(tup,2)
                print 'mode is *****', mode
                execID = execution.executeScenario( rootPath, scenID, scenType, mode,  \
                                 config.getfloat('execution', 'timeToRun'), config.getfloat('execution', 'masterDelay')   )
                print '***  Finished run %i of execution %i.' % (run, execID)
                sleep(30) 
    print '***  Done with executions.'