Example #1
0
        dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
        outDataDir = 'data/k6_' + dateTimeTag

    #sys.exit( 'DEBUGGING' )
    #if not os.path.isfile( workerDirPath + '/k6.tar.gz' ):
    #    logger.error( 'the compressed k6 binary was not found, you may need to build and compress it' )
    #    sys.exit( 1)
    try:
        rc = batchRunner.runBatch(frameProcessor=k6FrameProcessor(),
                                  commonInFilePath=workerDirPath,
                                  authToken=os.getenv('NCS_AUTH_TOKEN')
                                  or 'YourAuthTokenHere',
                                  cookie=args.cookie,
                                  encryptFiles=False,
                                  timeLimit=frameTimeLimit + 40 * 60,
                                  instTimeLimit=7 * 60,
                                  frameTimeLimit=frameTimeLimit,
                                  filter=args.filter,
                                  outDataDir=outDataDir,
                                  startFrame=1,
                                  endFrame=nFrames,
                                  nWorkers=nWorkers,
                                  limitOneFramePerWorker=True,
                                  autoscaleMax=1)
        if os.path.isfile(outDataDir + '/recruitLaunched.json'):
            rc2 = subprocess.call([
                sys.executable,
                scriptDirPath() + '/plotK6Output.py', '--dataDirPath',
                outDataDir
            ],
                                  stdout=subprocess.DEVNULL)
            if rc2:
Example #2
0
        cmd = './%s %d > %s' % \
            (workerBinFileName, frameNum, self.frameOutFileName(frameNum))
        return cmd


if __name__ == "__main__":
    # configure logger formatting
    #logging.basicConfig()
    logger = logging.getLogger(__name__)
    logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
    logDateFmt = '%Y/%m/%d %H:%M:%S'
    formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt)
    logging.basicConfig(format=logFmt, datefmt=logDateFmt)

    dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
    outDataDirPath = 'data/binary_' + dateTimeTag

    rc = batchRunner.runBatch(
        frameProcessor=binaryFrameProcessor(),
        commonInFilePath=binaryFrameProcessor.workerBinFilePath,
        filter='{"cpu-arch": "aarch64", "dpr": ">=24"}',
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        timeLimit=480,
        instTimeLimit=120,
        frameTimeLimit=120,
        outDataDir=outDataDirPath,
        encryptFiles=False,
        startFrame=1,
        endFrame=6)
    sys.exit(rc)
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt)
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG)  # for more verbosity

dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
outDataDir = 'data/puppeteer_' + dateTimeTag

try:
    rc = batchRunner.runBatch(
        frameProcessor=PuppeteerLighthouseFrameProcessor(),
        commonInFilePath=PuppeteerLighthouseFrameProcessor.PuppeteerFilePath,
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=80 * 60,
        instTimeLimit=24 * 60,
        frameTimeLimit=600,
        filter=
        '{"dpr": ">=48","ram:":">=2800000000","app-version": ">=2.1.11"}',
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=5,
        nWorkers=10,
        limitOneFramePerWorker=True,
        autoscaleMax=2)
    if os.path.isfile(outDataDir + '/recruitLaunched.json'):
        untarResults(outDataDir)
        rc2 = subprocess.call(
            ['./processPuppeteerOutput.py', '--dataDirPath', outDataDir],
            stdout=subprocess.DEVNULL)
        if rc2:
            logger.warning('processPuppeteerOutput exited with returnCode %d',
                           rc2)
Example #4
0
        return cmd
 
if __name__ == "__main__":
    # configure logger
    logger = logging.getLogger(__name__)
    logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
    logDateFmt = '%Y/%m/%d %H:%M:%S'
    formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
    logging.basicConfig(format=logFmt, datefmt=logDateFmt)

    dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )
    outDataDirPath = 'data/python_' + dateTimeTag

 
    rc = batchRunner.runBatch(
        frameProcessor = pythonFrameProcessor(),
        commonInFilePath = pythonFrameProcessor.workerScriptPath,
        authToken = os.getenv( 'NCS_AUTH_TOKEN' ) or 'YourAuthTokenHere',
        timeLimit = 1200,
        instTimeLimit = 450,
        frameTimeLimit = 300,
        filter = '{"dpr": ">=24"}',
        outDataDir = outDataDirPath,
        encryptFiles = False,
        autoscaleMin=0.8,
        autoscaleMax=1.5,
        startFrame = 1,
        endFrame = 9
    )
    sys.exit( rc )
Example #5
0
        timeLimit = 60
        interval = 5
        pingCmd = 'ping %s -U -D -c %s -w %f -i %f > %s' \
            % (targetHost, nPings, timeLimit,  interval, self.frameOutFileName(frameNum) )
        return pingCmd


# configure logger formatting
#logging.basicConfig()
logger = logging.getLogger(__name__)
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)

dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )
outDataDir = 'data/ping_' + dateTimeTag

rc = batchRunner.runBatch(
    frameProcessor = pingFrameProcessor(),
    authToken = os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
    timeLimit = 720,  # seconds
    frameTimeLimit = 120,
    outDataDir = outDataDir,
    autoscaleMax = 1.5,
    startFrame = 1,
    endFrame = 6
 )

sys.exit( rc )
Example #6
0
except forwarderHost:
    logger.warning( 'could not get public ip addr of this host')
if not forwarderHost:
    logger.error( 'forwarderHost not set')
    exit(1)
'''

try:
    # call runBatch to launch worker instances and install the geth client on them
    rc = batchRunner.runBatch(
        frameProcessor=gethFrameProcessor(),
        recruitOnly=True,
        pushDeviceLocs=False,
        commonInFilePath='netconfig',
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=not True,
        timeLimit=60 * 60,
        instTimeLimit=20 * 60,
        filter=
        '{"dpr": ">=51", "ram:": ">=4000000000", "storage": ">=20000000000"}',
        outDataDir=outDataDir,
        nWorkers=48)
    if rc == 0:
        #portRangeStart=7100
        launchedJsonFilePath = outDataDir + '/recruitLaunched.json'
        launchedInstances = []
        # get details of launched instances from the json file
        #TODO should get list of instances with good install, rather than all started instances
        with open(launchedJsonFilePath, 'r') as jsonInFile:
            try:
                launchedInstances = json.load(jsonInFile)  # an array
Example #7
0
        sys.exit(1)

    nSpiders = len(spiderNames)
    nWorkers = args.nWorkers if args.nWorkers else 0

    authToken = args.authToken or os.getenv('NCS_AUTH_TOKEN')
    if not authToken:
        logger.error(
            'please provide --authToken or set env var NCS_AUTH_TOKEN')
        sys.exit(1)

    #sys.exit( 'DEBUGGING' )

    rc = batchRunner.runBatch(
        frameProcessor=dlProcessor(),
        commonInFilePath=dlProcessor.scrapyProjPath,
        authToken=authToken,
        filter=args.filter,
        timeLimit=args.timeLimit,
        frameTimeLimit=args.unitTimeLimit,
        outDataDir=outDataDir,
        nWorkers=nWorkers,
        autoscaleInit=1.33,
        autoscaleMin=1.33,
        autoscaleMax=2.0,
        startFrame=0,
        endFrame=nSpiders - 1,
    )

    sys.exit(rc)
Example #8
0
    monProc = None
    if perfMonHost:
        perfmonOutFilePath = os.path.join(outDataDir, 'perfmonOut.csv')
        monProc = subprocess.Popen([
            sys.executable, 'perfmonClient.py', '--outFilePath',
            perfmonOutFilePath, '--pmHost', perfMonHost, '--pmPort',
            str(4444)
        ])
    rc = batchRunner.runBatch(
        frameProcessor=JMeterFrameProcessor(),
        commonInFilePath=JMeterFrameProcessor.JMeterFilePath,
        authToken=args.authToken or os.getenv('NCS_AUTH_TOKEN')
        or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=frameTimeLimit + 40 * 60,
        instTimeLimit=6 * 60,
        frameTimeLimit=frameTimeLimit,
        filter=args.filter,
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=nFrames,
        nWorkers=nWorkersToLaunch,
        limitOneFramePerWorker=True,
        autoscaleMax=2)
    if monProc:
        monProc.terminate()
    if (rc == 0) and os.path.isfile(outDataDir + '/recruitLaunched.json'):
        rampStepDuration = args.rampStepDuration
        SLODuration = args.SLODuration
        SLOResponseTimeMax = args.SLOResponseTimeMax

        rc2 = subprocess.call([
Example #9
0
else:
    logger.error( 'this version requires an agentDirPath' )
    sys.exit( 1 )
logger.debug( 'agentDirPath: %s', agentDirPath )


try:
    # call runBatch to launch worker instances and install the load generator agent on them
    rc = batchRunner.runBatch(
        frameProcessor = loadzillaFrameProcessor(),
        recruitOnly=True,
        pushDeviceLocs=False,
        commonInFilePath = loadzillaFrameProcessor.agentDirPath,
        authToken = args.authToken or os.getenv('NCS_AUTH_TOKEN'),
        cookie = args.cookie,
        sshClientKeyName=args.sshClientKeyName,
        encryptFiles=False,
        timeLimit = 12*60,
        instTimeLimit = 6*60,
        filter = args.filter,
        outDataDir = outDataDir,
        nWorkers = args.nAgents
    )
    if rc == 0:
        launchedJsonFilePath = outDataDir +'/recruitLaunched.json'
        launchedInstances = []
        # get details of launched instances from the json file
        with open( launchedJsonFilePath, 'r') as jsonInFile:
            try:
                launchedInstances = json.load(jsonInFile)  # an array
            except Exception as exc:
Example #10
0
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt)
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG)  # for more verbosity

dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
outDataDir = 'data/loadtest_' + dateTimeTag

try:
    rc = batchRunner.runBatch(
        frameProcessor=locustFrameProcessor(),
        commonInFilePath='locustWorker',
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=14 * 60,
        frameTimeLimit=240,
        filter=
        '{"dpr": ">=48", "ram":">=2800000000", "app-version": ">=2.1.11"}',
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=5,
        nWorkers=6,
        limitOneFramePerWorker=True,
        autoscaleMax=2)
    if os.path.isfile(outDataDir + '/recruitLaunched.json'):
        rc2 = subprocess.call([
            sys.executable, 'plotLocustOutput.py', '--dataDirPath', outDataDir
        ],
                              stdout=subprocess.DEVNULL)
        if rc2:
            logger.warning('plotLocustOutput.py exited with returnCode %d',
                           rc2)
Example #11
0
            sys.exit( 1 )
    logger.debug( 'nlAgent contents: %s', os.listdir(nlAgentDirName) )

    if nlWebWanted and not os.path.isfile( 'nlAgent/nlweb.properties'):
        logger.error( 'the file nlAgent/nlweb.properties was not found')
        sys.exit(1)
    try:
        # call runBatch to launch worker instances and install the load generator agent on them
        rc = batchRunner.runBatch(
            frameProcessor = neoloadFrameProcessor(),
            recruitOnly=True,
            pushDeviceLocs=False,
            commonInFilePath = 'nlAgent',
            authToken = authToken,
            cookie = args.cookie,
            sshClientKeyName=args.sshClientKeyName,
            encryptFiles=False,
            timeLimit = 60*60,
            instTimeLimit = instTimeLimit,
            filter = args.filter,
            outDataDir = outDataDir,
            nWorkers = args.nWorkers
        )
        if rc == 0:
            # get iids of instances successfully installed
            recruiterJlogFilePath = os.path.join( outDataDir, 'recruitInstances.jlog' )
            recruitedIids = []
            if os.path.isfile( recruiterJlogFilePath ):
                recruiterResults = readJLog( recruiterJlogFilePath )
                if not recruiterResults:
                    logger.warning( 'no entries in %s', recruiterJlogFilePath )
Example #12
0
gatlingVersion = '3.6.1'

dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
outDataDir = 'data/gatling_' + dateTimeTag

try:
    rc = batchRunner.runBatch(
        frameProcessor=gatlingFrameProcessor(),
        commonInFilePath='gatlingWorker',
        pushDeviceLocs=True,
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=89 * 60,
        instTimeLimit=12 * 60,
        frameTimeLimit=15 * 60,
        filter=
        '{ "regions": ["usa", "india"], "dar": ">= 99", "dpr": ">=48", "ram": ">=3800000000", "storage": ">=2000000000" }',
        outDataDir=outDataDir,
        limitOneFramePerWorker=True,
        autoscaleMax=2,
        startFrame=1,
        endFrame=6,
        nWorkers=10)
    if os.path.isfile(outDataDir + '/recruitLaunched.json'):
        # plot output (requires matplotlib)
        rc2 = subprocess.call([
            sys.executable, 'plotGatlingOutput.py', '--dataDirPath', outDataDir
        ],
                              stdout=subprocess.DEVNULL)
        if rc2:
Example #13
0
if os.path.isfile( outDataDir+'/batchRunner_results.jlog') \
    or os.path.isfile( outDataDir+'/recruitLaunched.json'):
    logger.error('please use a different outDataDir for each run')
    sys.exit(1)

try:
    rc = batchRunner.runBatch(
        frameProcessor=JMeterFrameProcessor(),
        commonInFilePath=workerDirPath,
        authToken=args.authToken or os.getenv('NCS_AUTH_TOKEN')
        or 'YourAuthTokenHere',
        cookie=args.cookie,
        encryptFiles=False,
        timeLimit=frameTimeLimit + 40 * 60,
        instTimeLimit=6 * 60,
        frameTimeLimit=frameTimeLimit,
        filter=args.filter,
        #filter = '{ "regions": ["usa", "india"], "dar": "==100", "dpr": ">=48", "ram": ">=3800000000", "storage": ">=2000000000" }',
        #filter = '{ "regions": ["usa", "india"], "dar": "==100", "dpr": ">=48", "ram": ">=2800000000", "app-version": ">=2.1.11" }',
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=nFrames,
        nWorkers=nWorkers,
        limitOneFramePerWorker=True,
        autoscaleMax=1)
    if (rc == 0) and os.path.isfile(outDataDir + '/recruitLaunched.json'):
        rampStepDuration = args.rampStepDuration
        SLODuration = args.SLODuration
        SLOResponseTimeMax = args.SLOResponseTimeMax

        rc2 = subprocess.call([
Example #14
0
    logging.basicConfig(format=logFmt, datefmt=logDateFmt)
    logger.setLevel(logging.INFO)
    #batchRunner.logger.setLevel(logging.DEBUG)  # for more verbosity

    dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )
    outDataDirPath = 'data/spin_' + dateTimeTag

 
    rc = batchRunner.runBatch(
        frameProcessor = blenderFrameProcessor(),
        commonInFilePath = blenderFrameProcessor.blendFilePath,
        authToken = os.getenv( 'NCS_AUTH_TOKEN' ) or 'YourAuthTokenHere',
        timeLimit = 4*3600,
        instTimeLimit = 1200,
        frameTimeLimit = 2100,
        autoscaleInit = 2,
        autoscaleMin = 1.5,
        autoscaleMax = 3,
        filter = '{"dpr": ">=48","ram:":">=2800000000","app-version": ">=2.1.11"}',
        outDataDir = outDataDirPath,
        encryptFiles = False,
        startFrame = 0,
        endFrame = 5
    )
    # this part is "extra credit" if you want to encode the output as video (and ffmpeg is installed)
    if rc == 0:
        import subprocess
        def encodeTo264( destDirPath, destFileName, frameRate, kbps=30000,
                frameFileType='png', startFrame=0 ):
            '''encode frames to an h.264 video; only works if you have ffmpeg installed'''
            kbpsParam = str(kbps)+'k'
            cmd = [ 'ffmpeg', '-y', '-framerate', str(frameRate),
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt)
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG)  # for more verbosity

dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
outDataDir = 'data/puppeteer_' + dateTimeTag

try:
    rc = batchRunner.runBatch(
        frameProcessor=PuppeteerLighthouseFrameProcessor(),
        commonInFilePath=PuppeteerLighthouseFrameProcessor.PuppeteerFilePath,
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=80 * 60,
        instTimeLimit=24 * 60,
        frameTimeLimit=600,
        filter=
        '{ "regions": ["usa", "india"], "dar": ">= 99", "dpr": ">=48", "ram": ">=3800000000", "storage": ">=2000000000" }',
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=5,
        nWorkers=10,
        limitOneFramePerWorker=True,
        autoscaleMax=2)
    if rc == 0 and os.path.isfile(outDataDir + '/recruitLaunched.json'):
        untarResults(outDataDir)
        rc2 = subprocess.call([
            sys.executable, 'processPuppeteerOutput.py', '--dataDirPath',
            outDataDir
        ],
                              stdout=subprocess.DEVNULL)
        if rc2:
Example #16
0
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG)  # for more verbosity

dateTimeTag = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
outDataDir = 'data/imageUpload_' + dateTimeTag

try:
    rc = batchRunner.runBatch(
        frameProcessor=JMeterFrameProcessor(),
        commonInFilePath=JMeterFrameProcessor.workerDirPath,
        authToken=os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
        encryptFiles=False,
        timeLimit=60 * 60,
        instTimeLimit=12 * 60,
        frameTimeLimit=14 * 60,
        filter=
        '{ "regions": ["usa", "india"], "dar": "==100", "dpr": ">=48", "ram": ">=3800000000", "storage": ">=2000000000" }',
        #filter = '{ "regions": ["usa", "india"], "dar": "==100", "dpr": ">=48", "ram:": ">=5800000000", "app-version": ">=2.1.11" }',
        outDataDir=outDataDir,
        startFrame=1,
        endFrame=6,
        nWorkers=10,
        limitOneFramePerWorker=True,
        autoscaleMax=2)
    if (rc == 0) and os.path.isfile(outDataDir + '/recruitLaunched.json'):
        rampStepDuration = 60
        SLODuration = 240
        SLOResponseTimeMax = 2.5

        rc2 = subprocess.call([
            sys.executable, 'plotJMeterOutput.py', '--dataDirPath', outDataDir,
Example #17
0
    if not urlList:
        logger.warning('no valid urls in the url file')
        sys.exit(1)

    processor = dlProcessor()

    os.makedirs(outDataDir, exist_ok=True)
    indexFilePath = os.path.join(outDataDir, 'index.csv')
    with open(indexFilePath, 'w') as indexFile:
        print('fileName', 'url', sep=',', file=indexFile)
        for ii, url in enumerate(urlList):
            fileName = processor.frameOutFileName(ii)
            #print( 'file', fileName, 'url', url )
            print(fileName, url, sep=',', file=indexFile)
    #sys.exit( 'DEBUGGING' )
    rc = batchRunner.runBatch(frameProcessor=processor,
                              authToken=args.authToken
                              or os.getenv('NCS_AUTH_TOKEN'),
                              filter=args.filter,
                              timeLimit=args.timeLimit,
                              frameTimeLimit=args.unitTimeLimit,
                              outDataDir=outDataDir,
                              nWorkers=args.nWorkers,
                              autoscaleInit=1.25,
                              autoscaleMax=3.0,
                              startFrame=0,
                              endFrame=len(urlList) - 1)

    sys.exit(rc)