Example #1
0
def regressContech(inputs, numthreads, benchmarks):

    if os.environ.has_key("NAS_HOME"):
        NAS_HOME = os.environ["NAS_HOME"]
    else:
        print ">Error: Could not find NAS installation. Set NAS_HOME to the root of your NAS directory."
        exit(1)

    # Rebuild benchmarks
    # NAS must be built sequentially
    compileJobIds = []
    for input in inputs:
        for b in benchmarks:
            x = compilationTimeCompare(b, input)
            compileJobIds.append(x)
            util.waitForJobs(x)
    time.sleep(10)  #Wait for files to be copied back
    buildRoot = scrape_build.processAll(
        [util.getFileNameForJob(j) for j in compileJobIds])

    # Run the benchmarks
    os.environ["TIME"] = '{"real":%e, "user":%U, "sys":%S, "mem":%M }'
    for input in inputs:
        runJobIds = []
        runJobIds.extend(
            [statsRun(b, numthreads, input, "contech") for b in benchmarks])
        runJobIds.extend([nativeRun(b, numthreads, input) for b in benchmarks])
        util.waitForJobs(runJobIds)
        root = buildRoot + scrape_run.processAll(
            [util.getFileNameForJob(j) for j in runJobIds])

        # Aggregate output
        table = aggregate_scrape_run.aggregate(root)
        aggregate_scrape_run.computeSlowdown(table)
        aggregate_scrape_run.generateCsv(table, "results-{}.csv".format(input))
Example #2
0
def regressContech(inputs, numthreads, benchmarks):
 
    if os.environ.has_key("NAS_HOME"):
        NAS_HOME = os.environ["NAS_HOME"]
    else:
        print ">Error: Could not find NAS installation. Set NAS_HOME to the root of your NAS directory."
        exit(1)

    # Rebuild benchmarks
    # NAS must be built sequentially
    compileJobIds = []
    for input in inputs:
        for b in benchmarks:
            x = compilationTimeCompare(b, input)
            compileJobIds.append(x)
            util.waitForJobs(x)
    time.sleep(1)    #Wait for files to be copied back
    buildRoot = scrape_build.processAll([util.getFileNameForJob(j) for j in compileJobIds])
    
    # Run the benchmarks
    os.environ["TIME"] = '{"real":%e, "user":%U, "sys":%S, "mem":%M }'
    for input in inputs:
        runJobIds = []
        runJobIds.extend([statsRun(b, numthreads, input, "contech") for b in benchmarks])
        runJobIds.extend([nativeRun(b, numthreads, input) for b in benchmarks])
        util.waitForJobs(runJobIds)
        root = buildRoot + scrape_run.processAll([util.getFileNameForJob(j) for j in runJobIds])
    
        # Aggregate output
        table = aggregate_scrape_run.aggregate(root)
        aggregate_scrape_run.computeSlowdown(table)
        aggregate_scrape_run.generateCsv(table, "results-{}.csv".format(input))
Example #3
0
def regressContech(inputs, numthreads, benchmarks, ro):

    # Rebuild benchmarks
    if ro == False:
        compileJobIds = [compilationTimeCompare(b) for b in benchmarks]
        buildRoot = scrape_build.processAll(compileJobIds)

    # Run the benchmarks
    os.environ["TIME"] = '{"real":%e, "user":%U, "sys":%S, "mem":%M }'
    for input in inputs:
        runJobIds = []
        runJobIds.extend(
            [statsRun(b, numthreads, input, "contech") for b in benchmarks])
        runJobIds.extend([nativeRun(b, numthreads, input) for b in benchmarks])
        root = buildRoot + scrape_run.processAll(runJobIds)

        # Aggregate output
        table = aggregate_scrape_run.aggregate(root)
        aggregate_scrape_run.computeSlowdown(table)
        aggregate_scrape_run.generateCsv(table, "results-{}.csv".format(input))
Example #4
0
def regressContech(inputs, numthreads, benchmarks):

    # Rebuild benchmarks
    compileJobIds = [compilationTimeCompare(b) for b in benchmarks]
    util.waitForJobs(compileJobIds)
    buildRoot = scrape_build.processAll([util.getFileNameForJob(j) for j in compileJobIds])
    
    # Run the benchmarks
    os.environ["TIME"] = '{"real":%e, "user":%U, "sys":%S, "mem":%M }'
    for input in inputs:
        runJobIds = []
        runJobIds.extend([statsRun(b, numthreads, input, "contech") for b in benchmarks])
        runJobIds.extend([nativeRun(b, numthreads, input) for b in benchmarks])
        util.waitForJobs(runJobIds)
        root = buildRoot + scrape_run.processAll([util.getFileNameForJob(j) for j in runJobIds])
    
        # Aggregate output
        table = aggregate_scrape_run.aggregate(root)
        aggregate_scrape_run.computeSlowdown(table)
        aggregate_scrape_run.generateCsv(table, "results-{}.csv".format(input))